content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
from typing import Sequence
def is_overlapping_lane_seq(lane_seq1: Sequence[int], lane_seq2: Sequence[int]) -> bool:
"""
Check if the 2 lane sequences are overlapping.
Overlapping is defined as::
s1------s2-----------------e1--------e2
Here lane2 starts somewhere on lane 1 and ends after it, OR::
s1------s2-----------------e2--------e1
Here lane2 starts somewhere on lane 1 and ends before it
Args:
lane_seq1: list of lane ids
lane_seq2: list of lane ids
Returns:
bool, True if the lane sequences overlap
"""
if lane_seq2[0] in lane_seq1[1:] and lane_seq1[-1] in lane_seq2[:-1]:
return True
elif set(lane_seq2) <= set(lane_seq1):
return True
return False | 155e3a962f3f457a868585798e1ab8d92c9f115f | 701,826 |
def output_handler(data, context):
"""Post-process TensorFlow Serving output before it is returned to the client.
Args:
data (obj): the TensorFlow serving response
context (Context): an object containing request and configuration details
Returns:
(bytes, string): data to return to client, response content type
"""
if data.status_code != 200:
raise ValueError(data.content.decode('utf-8'))
response_content_type = context.accept_header
prediction = data.content
return prediction,response_content_type | 7d1bbcb2310c4527c5ae9cfcd51be660532555df | 701,827 |
def largest_rectangle(h):
"""Hackerrank Problem: https://www.hackerrank.com/challenges/largest-rectangle/problem
Skyline Real Estate Developers is planning to demolish a number of old, unoccupied buildings and construct a
shopping mall in their place. Your task is to find the largest solid area in which the mall can be constructed.
Args:
h (list): List of building heights
Returns:
(int): The largest area formed from the buildings
"""
largest_area = 0
# Iterate through each building in list, and then determine the area if that building is the lowest height
for i in range(len(h)):
num_buildings = 0
# Compare against buildings to the left
for left in range(i, -1, -1):
if h[left] >= h[i]:
num_buildings += 1
else:
break
# Compare buildings to the right
for right in range(i + 1, len(h)):
if h[right] >= h[i]:
num_buildings += 1
else:
break
# Next, calculate the area formed by this building and it's neighbors
area = h[i] * num_buildings
# Set as largest new area if it's the current largest
if area > largest_area:
largest_area = area
return largest_area | 7d7b66929e8416fdf8fe64e080ec5ed288c72d88 | 701,828 |
def fahr2cel(t):
"""Converts an input temperature in fahrenheit to degrees celsius
Inputs:
t: temperature, in degrees Fahrenheit
Returns:
Temperature, in C
"""
return (t - 32) * 5 / 9 | b55f2405e06b124adf23b7833dedbe42ff9f75ba | 701,829 |
def to_int(value):
"""Converts the given string value into an integer. Returns 0 if the
conversion fails."""
try:
return int(value)
except (TypeError, ValueError):
return 0 | f219844de96d1d2236e94c4427c0ad27cc4b587b | 701,832 |
import random
def rand_sampling(ratio, stop, start=1) :
"""
random sampling from close interval [start, stop]
Args :
ratio (float): percentage of sampling
stop (int): upper bound of sampling interval
start (int): lower bound of sampling interval
Returns :
A random sampling list
"""
assert 0 <= ratio and ratio <= 100, 'The ratio must be between 0 and 100.'
assert stop >= start, 'Invalid interval'
sample_pool = [i for i in range(start, stop + 1)]
select_num = int(len(sample_pool) * ratio)
return sorted(random.sample(sample_pool, select_num)) | 9b54c6b364e71a97d7cd9fa392790c4afde2bae0 | 701,836 |
def contain_same_digit(a, b):
"""
This function tests whether or not numbers a and b contains the same digits.
"""
list_a = list(str(a))
list_b = list(str(b))
if len(list_a) == len(list_b):
for elt in list_a:
if elt not in list_b:
return False
return True
else:
return False | a09feb891e5413593531e56871a92c335e585d7b | 701,837 |
def parse_range(string):
"""
Parses IP range for args parser
:param string: formatted string X.X.X.X-Y.Y.Y.Y
:return: tuple of range
"""
ip_rng = string.split("-")
return [(ip_rng[0], ip_rng[1])] | 6f38e105284d58af2cef94275c25e02ad76acb80 | 701,839 |
import requests
import re
def get_title(url: str):
""" Get the Title of the web page and generates markdown formated link"""
html_source = requests.get(url).text
title = re.findall('<title>(.*?)</title>', html_source)[0].strip()
return f"[{title}]({url})" | c6b0a559e7e3369d34e6266b7636d5c4a13ed2f0 | 701,842 |
def item_cost_entry() -> float:
"""Return the sum of all user entries."""
print('\nENTER ITEMS (ENTER 0 TO END)')
subtotal: float = 0.0
while True:
cost: float = float(input('Cost of item: '))
if cost == 0:
break
else:
subtotal += cost
return subtotal | d9d5bdc53f2d37f348d477086935cba3ba6a8e7e | 701,843 |
def make_message(name):
"""Constructs a welcoming message. Input: string, Output:string."""
message = "Good morning, %s! Nice to see you."%name
return message | ac5c9f845cd6779fa37758ec6b27b69dd325fb7d | 701,845 |
def _parse_vertex_tuple(s):
"""Parse vertex indices in '/' separated form (like 'i/j/k', 'i//k' ...)."""
vt = [0, 0, 0]
for i, c in enumerate(s.split('/')):
if c:
vt[i] = int(c)
return tuple(vt) | e83182401b6443726660caf3688008f6209aed13 | 701,848 |
import torch
def tfidf_transform(tfidf_vectorizer, corpus_data, cuda0):
"""
Apply TFIDF transformation to test data.
Args:
vectorizer_train (object): trained tfidf vectorizer
newsgroups_test (ndarray): corpus of all documents from all categories in test set
Returns:
X_test (ndarray): tfidf word-document matrix of test data
"""
corpus_list = corpus_data["text"].tolist()
tfidf_vector = tfidf_vectorizer.fit_transform(corpus_list)
X_test = torch.t(torch.tensor(tfidf_vector.todense(), dtype=torch.float64, device=cuda0))
return X_test | 6f18b7114579412e1442b1c6220f34b7f643a2ab | 701,849 |
def numStationsNotIgnored(observations):
""" Take a list of ObservedPoints and returns the number of stations that are actually to be used and
are not ignored in the solution.
Arguments:
observations: [list] A list of ObservedPoints objects.
Return:
[int] Number of stations that are used in the solution.
"""
return len([obs for obs in observations if obs.ignore_station == False]) | 279f4075bc1fe155e4fa8b39758997c9748f06b8 | 701,850 |
def get_mapshape_from_searchmap(hashtable):
"""Suppose keys have the form (x, y). We want max(x), max(y)
such that not necessarily the key (max(x), max(y)) exists
Args:
hashtable(dict): key-value pairs
Returns:
int, int: max values for the keys
"""
ks = hashtable.keys()
h = max([y[0] for y in ks])
w = max([x[1] for x in ks])
return h+1, w+1 | cf5cb2051fc9254d70c60a71d2c7f9e378b0a39e | 701,853 |
import torch
def update_weights(batch, batch_dic):
"""
Readjust weights so they sum to 1.
Args:
batch_dic (dict): Dictionary with extra conformer
information about the batch
batch (dict): Batch dictionary
Returns:
new_weights (torch.Tensor): renormalized weights
"""
old_weights = batch["weights"]
conf_idx = torch.LongTensor(batch_dic["conf_idx"])
new_weights = old_weights[conf_idx]
new_weights /= new_weights.sum()
if torch.isnan(new_weights).any():
new_weights = torch.ones_like(old_weights[conf_idx])
new_weights /= new_weights.sum()
return new_weights | 90131e119e9d2cb849b79346fd2f5f86411f652b | 701,856 |
def getRuleCount( lstRules, policy_name ):
"""
This function return the rule count for a given policy
indicated by policy_name
Parameters:
- IN : 1. List containing all the rules
2. Name of the policy
- Out: # of rules in the policy.
"""
count = 0
for x in lstRules:
if x.split(',')[0] == policy_name:
count +=1
return count | b4956b0a5af91f1834ee4d463414df2cc02c8796 | 701,858 |
def send_format(catfact):
"""
Format's the catfact into a the message to send content string
"""
return """
Thank you for subscribing to CatFacts™
Did you know:
```
{}```
Type "UNSUBSCRIBE" to unsubscribe from future catfacts.
""".format(catfact) | 208605f35db4505bb0037cae23088c89e6b1475f | 701,859 |
def addstr(str1, str2):
"""Concatenate strings str1 & str2"""
return str(str1) + str(str2) | 209c932bca262a458013afcafd6899e6cf02b3b6 | 701,863 |
def get_word_pattern(word: str) -> str:
""" Get word pattern.
This pattern is useful to break substitution cipher.
:param word: Word to get pattern for.
:return: Word pattern.
"""
# There is no ordered set type in Python, but we can get that behaviour using
# dict keys because since python 3.7 dict keys are guaranteed to be ordered.
char_order = {}
for char in word:
char_order[char] = None
chars_indexed = list(char_order.keys())
pattern = list(map(lambda char: chars_indexed.index(char), (char for char in word)))
return ".".join(map(str, pattern)) | ce81624bd3690c2037c5b570e19ba2340907a24a | 701,871 |
import hashlib
def sha512_first_half(message: bytes) -> bytes:
"""
Returns the first 32 bytes of SHA-512 hash of message.
Args:
message: Bytes input to hash.
Returns:
The first 32 bytes of SHA-512 hash of message.
"""
return hashlib.sha512(message).digest()[:32] | 591a0096e643af32326ae051f9a993db82a758c5 | 701,875 |
def median(arr: list):
"""
Returns the median and its index in the array.
"""
indices = []
list_size = len(arr)
median = 0
if list_size % 2 == 0:
indices.append(int(list_size / 2) - 1) # -1 because index starts from 0
indices.append(int(list_size / 2))
median = (arr[indices[0]] + arr[indices[1]]) / 2
else:
indices.append(int(list_size / 2))
median = arr[indices[0]]
return median, indices | 639a6d4efbc91457520ef1411ef7b935fb477b82 | 701,876 |
def is_(a: object, b: object) -> bool:
"""
Return `a is b`, for _a_ and _b_.
Example:
>>> is_(object())(object())
False
Args:
a: left element of is expression
b: right element of is expression
Return:
`True` if `a is b`, `False` otherwise
"""
return a is b | 6edda9af046f6a45f37578c073ed0e21e3320778 | 701,878 |
def _set_default_contact_rating(contact_rating_id: int, type_id: int) -> int:
"""Set the default contact rating for mechanical relays.
:param contact_form_id: the current contact rating ID.
:param type_id: the type ID of the relay with missing defaults.
:return: _contact_rating_id
:rtype: int
"""
if contact_rating_id > 0:
return contact_rating_id
return {1: 2, 2: 4, 3: 2, 4: 1, 5: 2, 6: 2}[type_id] | e39f5f9701d4314cd19109ce08c599b3737cd064 | 701,880 |
import json
def _read_color_map(path, object_hook=None):
"""
Read a color map as json.
:param path (str): The path to read the map from.
:param object_hook (func): A Function to manipulate the json.
:return: A dictionary of color map.
"""
with open(path) as f:
return json.load(f, object_hook=object_hook) | 34c627443cd418d84b19bd54b3e79427d8168b1e | 701,881 |
def find_page(pages, limit, value):
"""
Function to calculate and return the current page of a paginated result.
:param pages:
:param limit:
:param value:
:return:
"""
page_range = [limit * page for page in range(1, pages + 1)]
for index, my_range in enumerate(page_range):
if value <= my_range:
return index + 1
return None | d1b97fb0c6c54c85b748922fb6df3d96121ea3c7 | 701,884 |
def compare(guess, answer):
"""
Compare guess and answer
Arguments:
guess -- a 4-digital number string of the guess.
answer -- a 4-digital number string of right answer.
Returns:
cow -- a number of the user guessed correctly in the correct place.
bull -- a number of the user guessed correctly in the wrong place.
"""
cow = 0
bull = 0
for i in range(len(guess)):
if guess[i] == answer[i]:
cow += 1
if guess[i] in answer:
bull += 1
bull = bull - cow
return cow, bull | f615f4ebd555c0c4119d0fcbf9ae581146fe7816 | 701,886 |
def _get_parent_node_by_pred(node, pred, search_current=False):
"""Find the first parent node that satisfies a predicate function."""
if not search_current:
node = node.parent
while node is not None:
if pred(node):
return node
node = node.parent
return None | a69be92c468758ea1faf366c2881588e8f6fd688 | 701,888 |
def float_like(x, /) -> bool:
"""
Tests if an object could be converted to a float.
Args:
x (Any): object to test
Returns:
bool: Whether the object can be converted to a float.
"""
try:
float(x)
return True
except ValueError:
return False | ed34d52e34bc7c09242fde6cd0890381df297325 | 701,890 |
def parse_exclusion_file(exclusion_file, exclusion_column):
"""
Reads in the specified column of the specified file into a set.
"""
exclusion_list = set()
with open(exclusion_file) as infile:
for line in infile:
to_exclude = line.split('\t')[exclusion_column]
exclusion_list.add(to_exclude)
return exclusion_list | 3ae8430a96ed1883691cd63b86cd26d24c6f7652 | 701,893 |
from typing import Dict
def encode_token_tx(token_tx: Dict) -> bytes:
"""
Creates bytes representation of token transaction data.
args:
token_tx: Dictionary containing the token transaction data.
returns:
Bytes to be saved as token value in DB.
"""
token_tx_str = ''
token_tx_str += token_tx['tokenAddress'] + '\0'
token_tx_str += token_tx['addressFrom'] + '\0'
token_tx_str += token_tx['addressTo'] + '\0'
token_tx_str += token_tx['value'] + '\0'
token_tx_str += token_tx['transactionHash'] + '\0'
token_tx_str += token_tx['timestamp'] + '\0'
return token_tx_str.encode() | 4100d4dacdeea4a588906151a02f9adef874aaec | 701,895 |
def distance(strand_a, strand_b):
"""
Compare two strings and count the differences.
:param strand_a string - String representing a strand of DNA.
:param strand_b string - String representing a different strand of DNA.
:return int - number of differences between 2 strands.
"""
if len(strand_a) != len(strand_b):
raise ValueError("The Hamming distance is only defined for sequences of equal length, "
" so an attempt to calculate it between sequences of different lengths should not work.")
dna_compared = zip(strand_a, strand_b)
num_differences = 0
for sequence in dna_compared:
if sequence[0] != sequence[1]:
num_differences += 1
return num_differences | 012e0b1640e738b17dc6a4fb4a01c1f53e0e7639 | 701,896 |
def clamp(x, inf=0, sup=1):
"""Clamps x in the range [inf, sup]."""
return inf if x < inf else sup if x > sup else x | 42c178afc0bdfc02fd31fe3f211f23cc04b40d2e | 701,901 |
def _get_item_kind(item):
"""Return (kind, isunittest) for the given item."""
try:
itemtype = item.kind
except AttributeError:
itemtype = item.__class__.__name__
if itemtype == 'DoctestItem':
return 'doctest', False
elif itemtype == 'Function':
return 'function', False
elif itemtype == 'TestCaseFunction':
return 'function', True
elif item.hasattr('function'):
return 'function', False
else:
return None, False | c597db3de4447c68f3d8187e2f988e6c81e19d00 | 701,902 |
def mapattr(value, arg):
"""
Maps an attribute from a list into a new list.
e.g. value = [{'a': 1}, {'a': 2}, {'a': 3}]
arg = 'a'
result = [1, 2, 3]
"""
if len(value) > 0:
res = [getattr(o, arg) for o in value]
return res
else:
return [] | 34e45bcf804d37feb5995b88534cca78679d8cfb | 701,904 |
def getTagNames(domain):
"""
Returns a list of tag names used by the domain.
:param domain: a domain object
:type domain: `escript.Domain`
:return: a list of tag names used by the domain
:rtype: ``list`` of ``str``
"""
return [n.strip() for n in domain.showTagNames().split(",") ] | f24ccdec61eca07cef283ed4e9d981236b530c62 | 701,910 |
def num_added_features(include_am, include_lm):
""" Determine the number of added word-level features (specifically AM and LM) """
added_feature_count = 0
if include_am:
added_feature_count += 1
if include_lm:
added_feature_count += 1
return added_feature_count | 83f344ad693f846f7a6dda0bcef429565d96870b | 701,918 |
def retrieve_cnv_data(store, solution, chromosome=''):
""" Retrieve copy number data for a specific solution
"""
cnv = store['solutions/solution_{0}/cn'.format(solution)]
if chromosome != '':
cnv = cnv[cnv['chromosome'] == chromosome].copy()
cnv['segment_idx'] = cnv.index
return cnv | 2c77415909ff27a3b3fd00e7abb8d25b67b6ea8f | 701,920 |
def banner(text: str, *, borderChar: str = '='):
"""Print 'text' as banner, optionally customise 'borderChar'."""
border = borderChar * len(text)
return '\n'.join([border, text, border]) | 76d27b762173e35a15e0e445eccea85cdef3b327 | 701,922 |
def parse_turn(turn):
"""Parse the input from the user for valid player strings and play positions
Args:
turn (string): Input string from the user that contains the played
position (0-8)
Returns:
(int/None): Returns interger on success or None on failure
"""
try:
# check if position is valid
if int(turn) > 8 or int(turn) < 0:
print("Position must be 0-8")
return None
return int(turn)
except Exception as e:
print("Could not parse position")
return None | 90abe0050ed6413931f8b50e622e4083c2fa4d87 | 701,923 |
def phonenumber(anon, obj, field, val):
"""
Generates a random US-style phone number
"""
return anon.faker.phone_number(field=field) | 8d19ba96b805fd117e2ceb9a926bb1f8a9966e0b | 701,924 |
def noop(obs):
"""
Transform that does absolutely nothing!
"""
return obs | 95ad1168d804c1021f328090068c7d6a260b7ca4 | 701,925 |
def add(a,b):
"""
This function returns the sum of the given numbers
"""
return a + b | 9998c4a350973839aeb8f64fe0ba555297f35ccc | 701,927 |
from typing import Union
import yaml
def read_yaml(path: str) -> Union[dict, list]:
"""Loads yaml at given path
Args:
path (str): path to yaml file
Returns:
Union[dict, list]: dictionary or list loaded from yaml depending on the yaml
"""
with open(path, encoding="UTF-8") as yaml_file:
return yaml.safe_load(yaml_file) | 66138b8968865d8951ae366ab3adb0c342bbfabe | 701,931 |
def is_legal(x, y, img):
"""
Check if (x, y) is a valid coordinate in img
Args:
x (int): x-coordinate
y (int): y-coordinate
img (numpy.array): Image
Returns:
bool -> True if valid, False otherwise
"""
if 0 <= x < img.shape[1] and 0 <= y < img.shape[0]:
return True
else:
return False | bc86f6b032932e4fb33b3d1983fc2a8586176e5f | 701,932 |
def rates_for_yr(rates_all_years, sim_year):
"""
Filter specific rates for a given year
Parameters
----------
rates_all_years : pandas DataFrame
rates, to be filtered by year
sim_year : int
year being simulated
Returns
-------
pop_w_rates : pandas DataFrame
rates for a given year
"""
rates_yr = rates_all_years[rates_all_years['yr'] == sim_year]
return rates_yr | 616cec13b0a686c2c7504c187c31dafaa7f88b6f | 701,936 |
def parse_output(output_file):
"""Parse output file for void fraction data.
Args:
output_file (str): path to simulation output file.
Returns:
results (dict): total unit cell, gravimetric, and volumetric surface
areas.
"""
results = {}
with open(output_file) as origin:
count = 0
for line in origin:
if "Surface area" in line:
if count == 0:
results['sa_unit_cell_surface_area'] = float(line.split()[2])
count = count + 1
elif count == 1:
results['sa_gravimetric_surface_area'] = float(line.split()[2])
count = count + 1
elif count == 2:
results['sa_volumetric_surface_area'] = float(line.split()[2])
print(
"\nSURFACE AREA\n" +
"%s\tA^2\n" % (results['sa_unit_cell_surface_area']) +
"%s\tm^2/g\n" % (results['sa_gravimetric_surface_area']) +
"%s\tm^2/cm^3" % (results['sa_volumetric_surface_area']))
return results | 4659c2014861f8467d9c2e1b1dc69c0bbef34cda | 701,937 |
import json
def load_label(label_path):
"""
Loads a label from a JSON file
"""
with open(label_path, 'r') as label_file:
label = json.load(label_file)
return label | f6a2873abee024d64ede78f18a96f0a1b95abd0b | 701,945 |
def resize(anns, size, output_size):
"""
Parameters
----------
anns : List[Dict]
Sequences of annotation of objects, containing `bbox` of [l, t, w, h].
size : Sequence[int]
Size of the original image.
output_size : Union[Number, Sequence[int]]
Desired output size. If size is a sequence like (w, h), the output size will be matched to this.
If size is an int, the smaller edge of the image will be matched to this number maintaing
the aspect ratio. i.e, if width > height, then image will be rescaled to
(output_size * width / height, output_size)
"""
w, h = size
if isinstance(output_size, int):
if (w <= h and w == output_size) or (h <= w and h == output_size):
return anns
if w < h:
ow = output_size
sw = sh = ow / w
else:
oh = output_size
sw = sh = oh / h
else:
ow, oh = output_size
sw = ow / w
sh = oh / h
new_anns = []
for ann in anns:
bbox = list(ann['bbox'])
bbox[0] *= sw
bbox[1] *= sh
bbox[2] *= sw
bbox[3] *= sh
new_anns.append({**ann, "bbox": bbox})
return new_anns | 800135ac9d65f55ae96d04fc645ac0d2b913b76c | 701,947 |
def has_overlap(x0, xd, y0, yd):
"""Return True if the ranges overlap.
Parameters
----------
x0, y0 : float
The min values of the ranges
xd, yd : float
The widths of the ranges
"""
return x0 + xd >= y0 and y0 + yd >= x0 | 6b2a6eff892e28376ed08bf8f60c67f49cdeff44 | 701,948 |
def gen_col_list(num_signals):
"""
Given the number of signals returns
a list of columns for the data.
E.g. 3 signals returns the list: ['Time','Signal1','Signal2','Signal3']
"""
col_list = ['Time']
for i in range(1, num_signals + 1):
col = 'Signal' + str(i)
col_list.append(col)
return col_list | 35fe1457c9e256f90f7695e066dcc3202f212d98 | 701,949 |
def abstract(func):
"""
An abstract decorator. Raises a NotImplementedError if called.
:param func: The function.
:return: The wrapper function.
"""
# noinspection PyUnusedLocal
# pylint: disable=unused-argument
def wrapper(*args, **kwargs):
raise NotImplementedError('{} has not been implemented'.format(func.__name__))
func.__isabstractmethod__ = True
return wrapper | 3497ae41c4987499cddc610e518a8a9251cadb31 | 701,953 |
def non_related_filter(questions_df, non_related_ids):
"""
Splits a questions dataframe between related and non-related discussions,
based on an Ids list of non-related discussions.
:param questions_df:
> A pandas dataframe of stackoverflow questions containing posts Ids;
:param non_related_ids:
> List like object containing Ids of manually filtered non-related
stack overflow discussions;
:return (tuple):
> Two dataframes, one with related discussions and another with
non-related discussions
"""
non_related = questions_df.loc[questions_df.Id.isin(non_related_ids)]
non_related = non_related.fillna(0.0)
related = questions_df.loc[~questions_df.Id.isin(non_related_ids)]
related = related.fillna(0.0)
return related, non_related | b7e64287b2bdb6a8999bcd8e7efd8e2787b991dd | 701,954 |
import math
def sin(x):
"""Return sin of x (x is in radians)"""
return math.sin(x) | c5b091892e54df064b61a109812b3ea1206b1713 | 701,955 |
def get_router_port(endpoint):
""" get the network device and port of where the endpoint is connected to.
Args:
endpoint (endpoint): endpoint
A routerport is a dict with the following keys:
router (string): name of the netork device
port (string): port on the router.
vlan (string): VLAN
Returns:
dict: router, port and vlan
"""
router = endpoint['router']
port = endpoint['port']
vlan = endpoint['vlan']
return {'router':router, 'port':port, 'vlan':vlan} | 30c331169a0c5e1a6b0bf91b8ad43c1db64dc532 | 701,960 |
import networkx
def createGraph(input_edge_list):
"""
From list of edges create and return a graph.
:param input_edge_list: list of edges
:returns G: the graph
"""
# first thing, how are the nodes separated in an edge
with open(input_edge_list, 'r') as f:
l = f.readline()
delimiter = l[1]
print(f"Graph creation started: ")
G = networkx.read_edgelist(input_edge_list, delimiter=delimiter)
print(f"----- Original graph has {G.number_of_nodes()} nodes and {G.number_of_edges()} edges.")
# only consider the largest connected component
G = G.subgraph(max(networkx.connected_components(G), key=len)).copy()
print(f"----- The largest component subgraph has {G.number_of_nodes()} nodes and {G.number_of_edges()} edges.\n")
return G | 5a92864aa78cc99218c45031c80a6b89a836f134 | 701,961 |
import math
def cie76(c1, c2):
"""
Color comparision using CIE76 algorithm.
Returns a float value where 0 is a perfect match and 100 is
opposing colors. Note that the range can be larger than 100.
http://zschuessler.github.io/DeltaE/learn/
LAB Delta E - version CIE76
https://en.wikipedia.org/wiki/Color_difference
E* = 2.3 corresponds to a JND (just noticeable difference)
"""
l = c2[0] - c1[0]
a = c2[1] - c1[1]
b = c2[2] - c1[2]
return math.sqrt((l * l) + (a * a) + (b * b)) | 9470b66231252decd8be7f07af2591ddf1278edc | 701,963 |
import random
def add_angle(r):
"""
Add angle for each r value to make up a coordinate of a polar coordinate.
"""
coords = []
for ri in r:
theta = random.random() * 360
coords.append((ri, theta))
if len(coords) == 1:
return coords[0]
else:
return coords | 0e91e9c7999627885218dde42bc2849e89071eff | 701,965 |
def get_tags_from_message(message):
"""
Given a message string, extracts hashtags and returns a comma-separated list
:param message: a Hipchat message body
"""
tags = {word.strip('#') for word in message.split() if word.startswith('#')}
return ','.join(tags) | 528f7702f43f8f81adf942c79b292f508773d205 | 701,967 |
def tf_read_img(tf, filename):
"""Loads a image file as float32 HxWx3 array; tested to work on png and jpg images."""
string = tf.read_file(filename)
image = tf.image.decode_image(string, channels=3)
image = tf.cast(image, tf.float32)
image /= 255
return image | 662fc1c9840e67fb0ff3fae4b12da5179e286e25 | 701,968 |
def _parseLinks(response, rel):
"""
Parses an HTTP response's ``Link`` headers of a given relation, according
to the Corelight API specification.
response (requests.Response): The response to parse the ``Link`` headers
out of.
rel (str): The link relation type to parse; all other relations are ignored.
Returns: A list of 2-tuples ``(string, string)`` where the 1st string is
the URL parsed out of a ``Link`` header; and the 2nd string is the optional
title associated with the link or None if none.
"""
links = response.headers.get("Link", None)
if not links:
return []
result = []
for l in links.split(","):
m = l.split(">", 1)
url = m[0].strip()[1:]
params = {}
for p in m[1].split(";"):
if not p:
continue
(k, v) = p.split("=")
params[k.strip().lower()] = v.strip()
if params.get("rel", None) != rel:
continue
title = params.get("title", None)
result.append((url, title))
return result | 73a4f2a7e981b335aa511e5e311ef7290a7695e3 | 701,970 |
def find_min_max(data):
"""Solution to exercise C-4.9.
Write a short recursive Python function that finds the minimum and
maximum values in a sequence without using any loops.
"""
n = len(data)
min_val = data[0]
max_val = data[0]
def recurse_minmax(idx):
nonlocal min_val, max_val
if idx == n:
return min_val, max_val # Base case
if data[idx] > max_val:
max_val = data[idx]
elif data[idx] < min_val:
min_val = data[idx]
return recurse_minmax(idx + 1)
return recurse_minmax(1) | b8f50d1dafa0f66ab61db8d4974c8d201cd4dc3c | 701,971 |
from typing import Dict
def dict_squares(n: int) -> Dict[int, int]:
"""Generates a dictionary with numbers from 0 to n as keys
which are mapped to their squares using dictionary comprehension.
doctests:
>>> dict_squares(2)
{0: 0, 1: 1, 2: 4}
>>> dict_squares(5)
{0: 0, 1: 1, 2: 4, 3: 9, 4: 16, 5: 25}
"""
x = {i: i ** 2 for i in range(0, n + 1)}
return x | 4101d0256069c07da6c3d5a8d1e40783a4b26eea | 701,981 |
from typing import Any
import typing
def hint_is_specialized(hint: Any, target: Any) -> bool:
"""Checks if a type hint is a specialized version of target.
E.g., hint_is_specialized(ClassVar[int], ClassVar) is True.
isinstance will invoke type-checking, which this methods sidesteps.
Behavior is undefined for simple type hints that don't take a type
argument, like Any or a bare type.
"""
return typing.get_origin(hint) is target | b651fc05290de82ab5a5833d10ca68d6a96f2d7a | 701,984 |
def module_enclosing_func(offset):
""" Test function to see if module-level enclosures are detected """
def module_closure_func(self):
"""
Actual closure function, should be reported as:
putil.tests.my_module.module_enclosing_func.module_closure_func
"""
self._exobj.add_exception(
exname='illegal_value',
extype=TypeError,
exmsg='Argument `value` is not valid'
)
return self._value1+offset
return module_closure_func | 399212d5cc04479639cdb5cacb50b167327f2445 | 701,988 |
def _front_left_tire_pressure_supported(data):
"""Determine if front left tire pressure is supported."""
return data["status"]["tirePressure"]["frontLeftTirePressurePsi"] is not None | b59ab6f4a9b3d0801c1c5c8798e7da2fab0b580d | 701,993 |
def count_missing_doc_types(articles):
"""
:param articles A PyMongo collection of articles
:return: int: Number or articles without a a 'doc_type' property or having
it equal to the empty string ('')
"""
return articles.count_documents(
{"$or": [{"doc_type": {"$exists": False}}, {"doc_type": ""}]}
) | b0e734590c4b74572382e377f9cd861fa5162af7 | 701,994 |
def module_loaded(module):
"""
Checks if the specified kernel-module has been loaded.
:param module: Name of the module to check
:return: True if the module is loaded, False if not.
"""
return any(s.startswith(module) for s in open("/proc/modules").readlines()) | f09e719acba7f8e2aed59816d3b99bd9575edcfd | 701,998 |
def IsMonophyleticForTaxa(tree,
taxa,
support=None):
"""check if a tree is monophyletic for a list of taxa.
Arguments
---------
tree : :class:`Tree`
Tree to analyse
taxa : list
List of taxa
support : float
Minimum bootstrap support
Returns
-------
bool
"""
tree.root_with_outgroup(taxa)
if support:
n = tree.is_monophyletic(taxa)
if n == -1:
return False
return tree.node(tree.node(tree.root).succ[0]).data.support >= support
else:
return tree.is_monophyletic(taxa) != -1 | fcb0066c4083183cc7b81195a0845897d95b1cde | 701,999 |
def format_duration(dur: float) -> str:
"""Formats duration (from minutes) into a readable format"""
if float(dur) >= 1.0:
return "{} min".format(int(dur))
else:
return "{} sec".format(int(round(dur * 60))) | 02393e051b751001af9c8092ff64ebcef7596d6f | 702,004 |
import struct
def unpack(structure, data):
"""
Unpack little endian hexlified binary string into a list.
"""
return struct.unpack('<' + structure, bytes.fromhex(data)) | 530cf57b74be1e171a6f0c7ba148bdf73e8a7612 | 702,005 |
def get_recall(indices, targets):
""" Calculates the recall score for the given predictions and targets
Args:
indices (Bxk): torch.LongTensor. top-k indices predicted by the model.
targets (B): torch.LongTensor. actual target indices.
Returns:
recall (float): the recall score
"""
targets = targets.view(-1, 1).expand_as(indices) # (Bxk)
hits = (targets == indices).nonzero()
if len(hits) == 0: return 0
n_hits = (targets == indices).nonzero()[:, :-1].size(0)
recall = n_hits / targets.size(0)
return recall | 63f4d7f36f63d3110c33989b03f264e1fa4aa4ff | 702,007 |
def get_collection_no(row):
"""Get the collection number from an expedition row."""
if row.get('collector_number'):
return row.collector_number
num = row.get('collector_number_numeric_only', '')
verb = row.get('collector_number_verbatim', '')
if verb and len(num) < 2:
return row.collector_number_verbatim
return row.collector_number_numeric_only | a3e92e24a6a5a95651b7ccde183ecc6b083a649a | 702,008 |
import configparser
from typing import Union
def _prompt_for_option_name (ARG_config_object: configparser.ConfigParser, ARG_section: str) -> Union[str, None]:
"""Prompts the user to enter a valid option name. Checks that option name exists.
Parameters
----------
ARG_config_object : configparser.ConfigParser
The settings, which have been read from `SETTINGS_FILE` by configparser and stored in this configparser.ConfigParser object.
ARG_section : str
A valid section name in the config object (see above).
Returns
-------
str or None
If a string, it's an option name that exists in the given section and config object. If None, the user wishes to exit whatever dialog called this function.
"""
print ()
print (f"SELECTED SECTION: {ARG_section}")
print ("Type the option name of the setting you would like to change, or")
_input = input ("leave blank to exit: ")
_input = _input.strip()
if _input == "":
return None
option_name = _input.lower()
if ARG_config_object.has_option (ARG_section, option_name) is False:
print (f"Sorry, {option_name} is not a valid option name.")
recursive_result = _prompt_for_option_name(ARG_config_object , ARG_section)
return recursive_result
else:
return option_name | b9aea1ba8a19d0c3a104a4e661a4043e9ad33889 | 702,010 |
def find_direct_conflicts(pull_ops, unversioned_ops):
"""
Detect conflicts where there's both unversioned and pulled
operations, update or delete ones, referering to the same tracked
object. This procedure relies on the uniqueness of the primary
keys through time.
"""
return [
(pull_op, local_op)
for pull_op in pull_ops
if pull_op.command == 'u' or pull_op.command == 'd'
for local_op in unversioned_ops
if local_op.command == 'u' or local_op.command == 'd'
if pull_op.row_id == local_op.row_id
if pull_op.content_type_id == local_op.content_type_id] | 5832a41b81cffd7e5c7d1f79472f9c44eaa3127a | 702,011 |
def init(input_mgr, user_data, logger):
"""Initialize the example source tool."""
# Get the selected value from the GUI and save it for later use in the user_data
user_data.val = float(input_mgr.workflow_config["Value"])
# Display info on the selected value
logger.display_info_msg(f"The value selected is {user_data.val}")
# Throw a warning if greater than 0.5
if user_data.val > 0.5:
logger.display_warn_msg(f"The value selected is greater than 0.5")
return True | dd922eea66b61e152675f9a27f6732cb8bd56209 | 702,017 |
import copy
def combine(to_merge, extend_by):
"""Merge nested dictionaries."""
def _combine(to_merge, extend_by):
for key, value in extend_by.items():
if key in to_merge:
if isinstance(to_merge[key], dict):
_combine(to_merge[key], value)
else:
to_merge[key] = value
else:
to_merge[key] = value
to_merge = copy.deepcopy(to_merge)
_combine(to_merge, extend_by)
return to_merge | 69a5713e65bace724370c722155a2677cd50c317 | 702,020 |
def chr22XY(c):
"""Reformats chromosome to be of the form Chr1, ..., Chr22, ChrX, ChrY, etc.
Args:
c (str or int): A chromosome.
Returns:
str: The reformatted chromosome.
Examples:
>>> chr22XY('1')
'chr1'
>>> chr22XY(1)
'chr1'
>>> chr22XY('chr1')
'chr1'
>>> chr22XY(23)
'chrX'
>>> chr22XY(24)
'chrY'
>>> chr22XY("X")
'chrX'
>>> chr22XY("23")
'chrX'
>>> chr22XY("M")
'chrM'
"""
c = str(c)
if c[0:3] == 'chr':
c = c[3:]
if c == '23':
c = 'X'
if c == '24':
c = 'Y'
return 'chr' + c | 13677f728ce8221e9a6966951353deba703f3294 | 702,021 |
def get_tourn_golfer_id(tourn_golfers_list, tourn_id, golfer_id):
"""
Helper function to get the tourn_golfer_id
based on the specified tourn_id and golfer_id
"""
for tourn_golfer in tourn_golfers_list:
if tourn_golfer.get_golfer_id() == golfer_id:
if tourn_golfer.get_tourn_id() == tourn_id:
return tourn_golfer.get_tourn_golfer_id()
# tg not found - just return 0
return 0 | ead84142f91289a8786aa57da6c64cc512309ff8 | 702,025 |
import re
def verify_raw_google_hash_header(google_hash: str) -> bool:
"""Verify the format of the raw value of the "x-goog-hash" header.
Note: For now this method is used for tests only.
:param str google_hash: the raw value of the "x-goog-hash" header
:rtype: bool
"""
return bool(re.match(r'(crc32c=[A-Za-z0-9+/=]+),(md5=[A-Za-z0-9+/=]+)', google_hash)) | 187c903c23e0c860e983b2e9b70890a36823c63f | 702,026 |
import hashlib
def get_SHA1(variant_rec):
"""Calculate the SHA1 digest from the ref, study, contig, start, ref, and alt attributes of the variant"""
h = hashlib.sha1()
keys = ['seq', 'study', 'contig', 'start', 'ref', 'alt']
h.update('_'.join([str(variant_rec[key]) for key in keys]).encode())
return h.hexdigest().upper() | 45e1aca002dc2ae972ee0e61c11441c11714c793 | 702,028 |
def appendToFile(fileName: str, content: str):
"""Writes content to the given file."""
with open(fileName, "a") as f:
f.write(content)
return None | a9e4604fa9404f3c304a40e18ead42a70d99e956 | 702,033 |
import re
def _apply_regex(regex, full_version):
"""
Applies a regular expression to the given full_version and tries to capture
a group
:param regex: the regular expression to apply
:param full_version: the string that the regex will apply
:return: None if the regex doesn't match or the result of the group
"""
match_object = re.match(regex, full_version)
if match_object and len(match_object.groups()) == 1:
subversion = match_object.group(1)
else:
subversion = None
return subversion | 0a053fd716844f4ec1ad166f414e4d1b931434ec | 702,035 |
def indices(a, func):
"""
Get indices of elements in an array which satisfies func
>>> indices([1, 2, 3, 4], lambda x: x>2)
[2, 3]
>>> indices([1, 2, 3, 4], lambda x: x==2.5)
[]
>>> indices([1, 2, 3, 4], lambda x: x>1 and x<=3)
[1, 2]
>>> indices([1, 2, 3, 4], lambda x: x in [2, 4])
[1, 3]
>>> indices([1,2,3,1,2,3,1,2,3], lambda x: x > 2)
[2, 5, 8]
"""
return [i for (i, val) in enumerate(a) if func(val)] | 8c1855cfdbbc11f7b88b23971f03717fc78be27a | 702,036 |
import re
import html
def escape_text(txt):
"""
Escape text, replacing leading spaces to non-breaking ones and newlines to <br> tag
"""
lines = []
for line in txt.splitlines():
lead_spaces = re.match(r'^\s+', line)
if lead_spaces: # Replace leading spaces with non-breaking ones
lines.append(lead_spaces.end() * ' ' + html.escape(line[lead_spaces.end():], quote=False))
else:
lines.append(html.escape(line, quote=False))
return '<br>'.join(lines) | 6c750fc9f0862a6b8a5739362918bb54ec73ea98 | 702,039 |
import json
def _clean_output_json(output_json: str) -> str:
"""Make JSON output deterministic and nicer to read."""
try:
output = json.loads(output_json)
except json.JSONDecodeError:
raise ValueError(
f"Instead of JSON, output was:\n--- output start ---\n{output_json}\n--- output end ---"
)
return json.dumps(output, indent=2, sort_keys=True) | 0952ed8f8cc34ca2c18aa3d09ca0c81607066332 | 702,040 |
import socket
def check_connection(server, port):
""" Checks connection to server on given port """
try:
sock = socket.create_connection((server, port), timeout=5)
except socket.error:
return False
else:
sock.close()
return True | 7b0b7174e7351c87a907d94012e65898cc38a713 | 702,042 |
def empirical_cdf(values, v):
"""
Returns the proportion of values in ``values`` <= ``v``.
"""
count = 0.0
for idx, v0 in enumerate(values):
if v0 < v:
count += 1
return count / len(values) | 65de22130e87ede7dc637e4140f324bdad6dc31b | 702,043 |
def _calculate_compliance(results):
"""
Calculate compliance numbers given the results of audits
"""
success = len(results.get('Success', []))
failure = len(results.get('Failure', []))
control = len(results.get('Controlled', []))
total_audits = success + failure + control
if total_audits:
compliance = float(success + control) / total_audits
compliance = int(compliance * 100)
compliance = '{0}%'.format(compliance)
return compliance
return None | d0855cd88a0ec88a1b9de2c1ba0372a854f2a9c6 | 702,045 |
def lame(E=None, v=None, u=None, K=None, Vp=None, Vs=None, rho=None):
"""
Compute the first Lame's parameter of a material given other moduli.
:param: E: Young's modulus (combine with v, u, or K)
:param v: Poisson's ratio (combine with E, u, or K)
:param u: shear modulus (combine with E, v, or K)
:param K: Bulk modulus (combine with E, v, or u)
:param Vp: Compressional velocity (combine with Vs and rho)
:param Vs: Shear velocity (combine with Vp and rho)
:param rho: Density (combine with Vp and Vs)
"""
if E and v:
L = E*v/((1+v)*(1-2*v))
elif E and u:
L = u*(E - 2*u)/(3*u - E)
elif E and K:
L = 3*K*(3*K-E)/(9*K-E)
elif v and u:
L = 2*u*v/(1-2*v)
elif v and K:
L = 3*K*v/(1+v)
elif u and K:
L = (3*K-2*u)/3
elif Vp and Vs and rho:
L = rho*(Vp**2 - 2*Vs**2)
else:
L = None
return(L) | b8f1d52bac3130b69f75903091d00ce49ba553f8 | 702,048 |
def total_points(min_x, max_x, points_per_mz):
"""
Calculate the number of points for the regular grid based on the full width at half maximum.
:param min_x: the lowest m/z value
:param max_x: the highest m/z value
:param points_per_mz: number of points per fwhm
:return: total number of points
:rtype: int
"""
if min_x > max_x:
raise ValueError("min_x > max_x")
if min(min_x, max_x, points_per_mz) <= 0:
raise ValueError("all inputs must be greater than 0")
return int((max_x - min_x) * points_per_mz) + 1 | e0680e386559a9603b3b23b9627bd8648b23e65a | 702,052 |
def time_text_to_float(time_string):
"""Convert tramscript time from text to float format."""
hours, minutes, seconds = time_string.split(':')
seconds = int(hours) * 3600 + int(minutes) * 60 + float(seconds)
return seconds | 96d84804aad2cd094901e61373855604d4768894 | 702,054 |
from typing import Tuple
from typing import List
from datetime import datetime
def read_tides(path: str) -> Tuple[List, List]:
"""Read a CO-OPS tide data CSV file and return
a list of times and elevations."""
data = [line.strip() for line in open(path)]
time = []
elevation = []
for line in data[1:]:
line = line.replace('"','').split(',')
time.append(datetime.strptime(line[0] + line[1], '%Y/%m/%d%H:%M'))
elevation.append(float(line[4]))
return time, elevation | fcbe79a5ddedbaccd0bc0a9c317324881a774a78 | 702,057 |
from typing import Any
import json
def read_json_file(filename: str) -> Any:
"""Read a json file
Parameters
----------
filename : str
The json file
Returns
-------
A json object
Example
-------
>>> import allyoucanuse as aycu
>>> content = aycu.read_json_file("tickets.json")
"""
with open(filename) as f:
return json.load(f) | e1afe013da96adfa5dd0e3ea998dce1323efe231 | 702,059 |
def combine_loss_components(critic_loss_val, actor_loss_val, entropy_val,
actor_loss_weight, entropy_bonus):
"""Combine the components in the combined AWR loss."""
return critic_loss_val + (actor_loss_val * actor_loss_weight) - (
entropy_val * entropy_bonus) | 1a060140aa3e08944d2e3f4cf1e4603c88e16921 | 702,061 |
def waitfor(css_selector, text=None, classes=None):
"""
Decorator for specifying elements (selected by a CSS-style selector) to
explicitly wait for before taking a screenshot. If text is set, wait for the
element to contain that text before taking the screenshot. If classes is
present, wait until the element has all classes.
"""
def decorator(method):
if not isinstance(getattr(method, '__waitfors', None), list):
setattr(method, '__waitfors', [])
method.__waitfors.append({
'css_selector': css_selector,
'text': text,
'classes': classes
})
return method
return decorator | 13130146441f20cfd424e52e46f2f2e3a7e33d3f | 702,067 |
def transpose(table):
"""
Returns a copy of table with rows and columns swapped
Example:
1 2 1 3 5
3 4 => 2 4 6
5 6
Parameter table: the table to transpose
Precondition: table is a rectangular 2d List of numbers
"""
# LIST COMPREHENSION
#return [[row[i] for row in table] for i in range(len(table[0]))]
# ACCUMULATOR PATTERN
# Find the size of the (non-ragged) table
numrows = len(table)
numcols = len(table[0]) # All rows have same no. cols
# Build the table
result = [] # Result accumulator
for m in range(numcols):
# Make a single row
row = [] # Single row accumulator
for n in range(numrows):
row.append(table[n][m])
#Add the result to the table
result.append(row)
return result | 2a393a9e3606022d945454da55fd68543e59476b | 702,071 |
def memsizeformat(size):
"""Returns memory size in human readable (rounded) form.
"""
if size > 1048576: # 1024**2
return "{0} GB".format(size / 1048576)
elif size > 1024:
return "{0} MB".format(size / 1024)
else:
return "{0} KB".format(size) | 0cd812d83bd85b1e2690e0404a4eb833e2e9824f | 702,072 |
def flip_dataframe(df, new_colname='index'):
"""Flips table such that first row becomes columns
Args:
df (DataFrame): Data frame to be flipped.
new_colname (str): Name of new column. Defaults to 'index'.
Returns:
DataFrame: flipped data frame.
"""
colnames = [new_colname] + df.iloc[:, 0].tolist()
df = df.T.reset_index()
df.columns = colnames
df = df.iloc[1:, :]
return df | 3a7c733644e2c67398a511c9dea7fa80845bbecf | 702,073 |
def ca65_bytearray(s):
"""Convert a byteslike into ca65 constant byte statements"""
s = [' .byt ' + ','.join("%3d" % ch for ch in s[i:i + 16])
for i in range(0, len(s), 16)]
return '\n'.join(s) | 8bdc868cc659e6b99f01449c6bf41884c0635c14 | 702,074 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.