content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def closestMedioidI(active_site, medioids, distD):
"""
returns the index of the closest medioid in medioids to active_site
input: active_site, an ActiveSite instance
medioids, a list of ActiveSite instances
distD, a dictionary of distances
output: the index of the ActiveSite closest to active_site in medioids
"""
closest = (float('Inf'), None)
for i, medioid in enumerate(medioids):
thisDist = distD[frozenset([active_site, medioid])]
if thisDist < closest[0]:
closest = (thisDist, i)
return closest[1] | 379f98a84751c0a392f8f9b1703b89b299979676 | 708,308 |
def _to_native_string(string, encoding='ascii'):
"""Given a string object, regardless of type, returns a representation of
that string in the native string type, encoding and decoding where
necessary. This assumes ASCII unless told otherwise.
"""
if isinstance(string, str):
out = string
else:
out = string.decode(encoding)
return out | b50fd0fc62b2cfc024c847b98e1f85b4b67d07e3 | 708,311 |
def login(client, password="pass", ):
"""Helper function to log into our app.
Parameters
----------
client : test client object
Passed here is the flask test client used to send the request.
password : str
Dummy password for logging into the app.
Return
-------
post request object
The test client is instructed to send a post request to the /login
route. The request contains the fields values to be posted by the form.
"""
return client.post('/login',
data=dict(pass_field=password, remember_me=True),
follow_redirects=True) | 5adca2e7d54dabe47ae92f0bcebb93e0984617b1 | 708,316 |
def mock_datasource_http_oauth2(mock_datasource):
"""Mock DataSource object with http oauth2 credentials"""
mock_datasource.credentials = b"client_id: FOO\nclient_secret: oldisfjowe84uwosdijf"
mock_datasource.location = "http://foo.com"
return mock_datasource | 8496f6b9ac60af193571f762eb2ea925915a1223 | 708,319 |
def generateFromSitePaymentObject(signature: str, account_data: dict, data: dict)->dict:
"""[summary]
Creates object for from site chargment request
Args:
signature (str): signature hash string
account_data (dict): merchant_account: str
merchant_domain: str
data (dict): order + personal data to create charge
orderReference (str): timestamp
amount (float): order total amount
currency (str): 'USD', 'UAH', 'RUB'
card (str): user card number
expMonth (str): card expires month
expYear (str): card expires year
cardCvv (str): card cvv
cardHolder (str): full name of card holder "Test test"
productName (list[str]): product names list
productPrice (list[float]): product price list
productCount (list[int]): product count list
clientFirstName (str): client first name
clientLastName (str): client last name
clientCountry (str): client country
clientEmail (str): client email
clientPhone (str): client phone
Returns:
dict: [description]
"""
return {
"transactionType":"CHARGE",
'merchantAccount': account_data['merchant_account'],
"merchantAuthType":"SimpleSignature",
'merchantDomainName': account_data['merchant_domain'],
"merchantTransactionType":"AUTH",
"merchantTransactionSecureType": "NON3DS",
'merchantSignature': signature,
"apiVersion":1,
'orderReference': str(data['orderReference']),
'orderDate': str(data['orderReference']),
"amount":data["amount"],
'currency': data['currency'],
"card":data['card'],
"expMonth":data['expMonth'],
"expYear":data['expYear'],
"cardCvv":data['cardCvv'],
"cardHolder":data['cardHolder'],
'productName': list(map(str, data['productName'])),
'productPrice': list(map(float, data['productPrice'])),
'productCount': list(map(int, data['productCount'])),
"clientFirstName":data['clientFirstName'],
"clientLastName":data['clientLastName'],
"clientCountry":data['clientCountry'],
"clientEmail":data['clientEmail'],
"clientPhone":data['clientPhone'],
} | 149434694e985956dede9bf8b6b0da1215ac9963 | 708,322 |
def convert_millis(track_dur_lst):
""" Convert milliseconds to 00:00:00 format """
converted_track_times = []
for track_dur in track_dur_lst:
seconds = (int(track_dur)/1000)%60
minutes = int(int(track_dur)/60000)
hours = int(int(track_dur)/(60000*60))
converted_time = '%02d:%02d:%02d' % (hours, minutes, seconds)
converted_track_times.append(converted_time)
return converted_track_times | 3d5199da01529f72b7eb6095a26e337277f3c2c9 | 708,327 |
def sync_xlims(*axes):
"""Synchronize the x-axis data limits for multiple axes. Uses the maximum
upper limit and minimum lower limit across all given axes.
Parameters
----------
*axes : axis objects
List of matplotlib axis objects to format
Returns
-------
out : yxin, xmax
The computed bounds
"""
xmins, xmaxs = zip(*[ax.get_xlim() for ax in axes])
xmin = min(xmins)
xmax = max(xmaxs)
for ax in axes:
ax.set_xlim(xmin, xmax)
return xmin, xmax | a377877a9647dfc241db482f8a2c630fe3eed146 | 708,328 |
def total_length(neurite):
"""Neurite length. For a morphology it will be a sum of all neurite lengths."""
return sum(s.length for s in neurite.iter_sections()) | 854429e073eaea49c168fb0f9e381c71d7a7038a | 708,330 |
def init_columns_entries(variables):
"""
Making sure we have `columns` & `entries` to return, without effecting the original objects.
"""
columns = variables.get('columns')
if columns is None:
columns = [] # Relevant columns in proper order
if isinstance(columns, str):
columns = [columns]
else:
columns = list(columns)
entries = variables.get('entries')
if entries is None:
entries = [] # Entries of dict with relevant columns
elif isinstance(entries, dict):
entries = [entries]
else:
entries = list(entries)
return columns, entries | 49a12b0561d0581785c52d9474bc492f2c64626c | 708,333 |
import base64
def data_uri(content_type, data):
"""Return data as a data: URI scheme"""
return "data:%s;base64,%s" % (content_type, base64.urlsafe_b64encode(data)) | f890dc1310e708747c74337f5cfa2d6a31a23fc0 | 708,340 |
def next_line(ionex_file):
"""
next_line
Function returns the next line in the file
that is not a blank line, unless the line is
'', which is a typical EOF marker.
"""
done = False
while not done:
line = ionex_file.readline()
if line == '':
return line
elif line.strip():
return line | 053e5582e5146ef096d743973ea7069f19ae6d4d | 708,341 |
def response_GET(client, url):
"""Fixture that return the result of a GET request."""
return client.get(url) | b4762c9f652e714cc5c3694b75f935077039cb02 | 708,343 |
def _get_realm(response):
"""Return authentication realm requested by server for 'Basic' type or None
:param response: requests.response
:type response: requests.Response
:returns: realm
:rtype: str | None
"""
if 'www-authenticate' in response.headers:
auths = response.headers['www-authenticate'].split(',')
basic_realm = next((auth_type for auth_type in auths
if auth_type.rstrip().lower().startswith("basic")),
None)
if basic_realm:
realm = basic_realm.split('=')[-1].strip(' \'\"').lower()
return realm
else:
return None
else:
return None | 346b3278eb52b565f747c952493c15820eece729 | 708,344 |
def get_in(obj, lookup, default=None):
""" Walk obj via __getitem__ for each lookup,
returning the final value of the lookup or default.
"""
tmp = obj
for l in lookup:
try: # pragma: no cover
tmp = tmp[l]
except (KeyError, IndexError, TypeError): # pragma: no cover
return default
return tmp | 73dfcaadb6936304baa3471f1d1e980f815a7057 | 708,346 |
def system(_printer, ast):
"""Prints the instance system initialization."""
process_names_str = ' < '.join(map(lambda proc_block: ', '.join(proc_block), ast["processNames"]))
return f'system {process_names_str};' | f16c6d5ebe1a029c07efd1f34d3079dd02eb4ac0 | 708,356 |
def blend_multiply(cb: float, cs: float) -> float:
"""Blend mode 'multiply'."""
return cb * cs | d53c3a49585cf0c12bf05c233fc6a9dd30ad25b9 | 708,357 |
def my_func_1(x, y):
"""
Возвращает возведение числа x в степень y.
Именованные параметры:
x -- число
y -- степень
(number, number) -> number
>>> my_func_1(2, 2)
4
"""
return x ** y | 9572566f1660a087056118bf974bf1913348dfa4 | 708,358 |
def matrix_mult(a, b):
"""
Function that multiplies two matrices a and b
Parameters
----------
a,b : matrices
Returns
-------
new_array : matrix
The matrix product of the inputs
"""
new_array = []
for i in range(len(a)):
new_array.append([0 for i in range(len(b[0]))])
for j in range(len(b[0])):
for k in range(len(a[0])):
new_array[i][j] += a[i][k] * b[k][j]
return new_array | 5e0f27f29b6977ea38987fa243f08bb1748d4567 | 708,359 |
def clear_bit(val, offs):
"""Clear bit at offset 'offs' in value."""
return val & ~(1 << offs) | e50e5f8ccc3fe08d9b19248e290c2117b78379ee | 708,363 |
def _ExtractCLPath(output_of_where):
"""Gets the path to cl.exe based on the output of calling the environment
setup batch file, followed by the equivalent of `where`."""
# Take the first line, as that's the first found in the PATH.
for line in output_of_where.strip().splitlines():
if line.startswith('LOC:'):
return line[len('LOC:'):].strip() | 6a0c0d4aa74b4e84de69de023e2721edd95c36bd | 708,364 |
def get_edges_out_for_vertex(edges: list, vertex: int) -> list:
"""Get a sublist of edges that have the specified vertex as first element
:param edges: edges of the graph
:param vertex: vertex of which we want to find the corresponding edges
:return: selected edges
"""
return [e for e in edges if e[0] == vertex] | 21485073df1c754e7c8e2b7dd9cafef284e601e7 | 708,367 |
def build_job_spec_name(file_name, version="develop"):
"""
:param file_name:
:param version:
:return: str, ex. job-hello_world:develop
"""
name = file_name.split('.')[-1]
job_name = 'job-%s:%s' % (name, version)
return job_name | 55a45052852e6b24cb4370f7efe5c213da83e423 | 708,369 |
def single_from(iterable):
"""Check that an iterable contains one unique value, and return it."""
unique_vals = set(iterable)
if len(unique_vals) != 1:
raise ValueError('multiple unique values found')
return unique_vals.pop() | c8fb8864083195ad913ff1ddf0114b5a50068902 | 708,373 |
def api_2_gamma_oil(value):
"""
converts density in API(American Petroleum Institute gravity) to gamma_oil (oil relative density by water)
:param value: density in API(American Petroleum Institute gravity)
:return: oil relative density by water
"""
return (value + 131.5) / 141.5 | 20e625f22092461fcf4bc2e2361525abf8051f97 | 708,378 |
def remove_empty(s):
"""\
Remove empty strings from a list.
>>> a = ['a', 2, '', 'b', '']
>>> remove_empty(a)
[{u}'a', 2, {u}'b']
"""
while True:
try:
s.remove('')
except ValueError:
break
return s | 98778e4cc90f11b9b74ac6d26b203cbfc958fd7b | 708,388 |
import hashlib
def md5_hash_file(fh):
"""Return the md5 hash of the given file-object"""
md5 = hashlib.md5()
while True:
data = fh.read(8192)
if not data:
break
md5.update(data)
return md5.hexdigest() | f572ec27add8024e5fa8b9a82b5d694905e4d0f8 | 708,390 |
def find_first_img_dim(import_gen):
"""
Loads in the first image in a provided data set and returns its dimensions
Intentionally returns on first iteration of the loop
:param import_gen: PyTorch DataLoader utilizing ImageFolderWithPaths for its dataset
:return: dimensions of image
"""
for x, _, _ in import_gen:
return x[0].shape[-2], x[0].shape[-1] | 3ccaccdfb20d7b2ca4d339adacd3c706a460fdef | 708,393 |
from typing import List
def metadata_partitioner(rx_txt: str) -> List[str]:
"""Extract Relax program and metadata section.
Parameters
----------
rx_txt : str
The input relax text.
Returns
-------
output : List[str]
The result list of partitioned text, the first element
is the relax program, and the second is metadata section.
"""
partitions = []
left_curly = 0
meta_start = 0
meta_end = 0
for i, char in enumerate(rx_txt):
if i < 0:
raise ValueError("The program is invalid.")
if char == "{":
if meta_start == 0:
meta_start = i
left_curly += 1
elif char == "}":
left_curly -= 1
if left_curly == 0:
meta_end = i + 1
break
if meta_end == 0:
raise ValueError("The metadata section was not found.")
metadata = rx_txt[meta_start:meta_end]
rx_program = rx_txt[meta_end:-1]
partitions.append(rx_program)
partitions.append(metadata)
return partitions | dd09aff9ea517813d43ff307fb9fc425b7338943 | 708,395 |
def fin(activity):
"""Return the end time of the activity. """
return activity.finish | ed5b1d1e0f29f403cfee357a264d05d5cc88093e | 708,398 |
def solve(in_array):
"""
Similar to 46442a0e, but where new quadrants are flips of the original array rather than rotations
:param in_array: input array
:return: expected output array
"""
array_edgelength = len(in_array[0]) # input array edge length
opp_end = array_edgelength*2-1 # used for getting opposite end of array
prediction = [[-1]*array_edgelength*2 for i in range(array_edgelength*2)] # init 2d array
# iterate through all values
for y in range(len(in_array)):
for x in range(len(in_array[0])):
val = in_array[y][x]
prediction[y][x] = val
# other 3 quadrants are flips
prediction[y][opp_end-x] = val
prediction[opp_end-y][opp_end-x] = val
prediction[opp_end-y][x] = val
return prediction | 0af23e82caf65bea64eeeae6da8400ef6ec03426 | 708,399 |
def extract(d, keys):
"""
Extract a key from a dict.
:param d: The dict.
:param keys: A list of keys, in order of priority.
:return: The most important key with an value found.
"""
if not d:
return
for key in keys:
tmp = d.get(key)
if tmp:
return tmp | 9985e2f1079088251429fa26611fa6e15b920622 | 708,403 |
from pathlib import Path
def get_output_filename(output_folder: str, repository_type: str,
repository_name: str, filename: str) -> Path:
"""Returns the output filename for the file fetched from a repository."""
return (
Path(output_folder) / Path(repository_type.lower())
/ Path(Path(repository_name).name) / Path(Path(filename).name)
) | 23b806f98265b45b799dbcc177760d5ceb8248fb | 708,404 |
def b_2_d(x):
"""
Convert byte list to decimal
:param x: byte list
:return: decimal
"""
s = 0
for i in range(0, len(x)):
s += x[i]*2**i
return s | e865700ea30be535ad014908d6b6024186cc5ac6 | 708,407 |
import pathlib
import stat
def check_file(file_name):
"""
test if file: exists and is writable or can be created
Args:
file_name (str): the file name
Returns:
(pathlib.Path): the path or None if problems
"""
if not file_name:
return None
path = pathlib.Path(file_name)
# if file exists test if writable
if path.exists() and path.is_file():
handle = None
try:
handle = open(path, 'w')
except PermissionError:
return None
finally:
if handle:
handle.close()
# crate file with write permissions
try:
path.touch(stat.S_IWUSR)
except PermissionError:
return None
return path | 5b8ff64795aa66d3be71444e158357c9b7a1b2c0 | 708,412 |
def echo(word:str, n:int, toupper:bool=False) -> str:
"""
Repeat a given word some number of times.
:param word: word to repeat
:type word: str
:param n: number of repeats
:type n: int
:param toupper: return in all caps?
:type toupper: bool
:return: result
:return type: str
"""
res=word*n
if (toupper):
res=res.upper()
return res | 62a68c1ff577781a84a58f124beec8d31b0b456c | 708,413 |
import math
def mul_pdf(mean1, var1, mean2, var2):
"""
Multiply Gaussian (mean1, var1) with (mean2, var2) and return the
results as a tuple (mean, var, scale_factor).
Strictly speaking the product of two Gaussian PDFs is a Gaussian
function, not Gaussian PDF. It is, however, proportional to a Gaussian
PDF. `scale_factor` provides this proportionality constant
Parameters
----------
mean1 : scalar
mean of first Gaussian
var1 : scalar
variance of first Gaussian
mean2 : scalar
mean of second Gaussian
var2 : scalar
variance of second Gaussian
Returns
-------
mean : scalar
mean of product
var : scalar
variance of product
scale_factor : scalar
proportionality constant
Examples
--------
>>> mul(1, 2, 3, 4)
(1.6666666666666667, 1.3333333333333333)
References
----------
Bromily. "Products and Convolutions of Gaussian Probability Functions",
Tina Memo No. 2003-003.
http://www.tina-vision.net/docs/memos/2003-003.pdf
"""
mean = (var1*mean2 + var2*mean1) / (var1 + var2)
var = 1. / (1./var1 + 1./var2)
S = math.exp(-(mean1 - mean2)**2 / (2*(var1 + var2))) / \
math.sqrt(2 * math.pi * (var1 + var2))
return mean, var, S | 8ecb925273cd0e4276b867687e81b0a26419f35f | 708,416 |
def obfuscate_email(email):
"""Takes an email address and returns an obfuscated version of it.
For example: [email protected] would turn into t**t@e*********m
"""
if email is None:
return None
splitmail = email.split("@")
# If the prefix is 1 character, then we can't obfuscate it
if len(splitmail[0]) <= 1:
prefix = splitmail[0]
else:
prefix = f'{splitmail[0][0]}{"*"*(len(splitmail[0])-2)}{splitmail[0][-1]}'
# If the domain is missing or 1 character, then we can't obfuscate it
if len(splitmail) <= 1 or len(splitmail[1]) <= 1:
return f"{prefix}"
else:
domain = f'{splitmail[1][0]}{"*"*(len(splitmail[1])-2)}{splitmail[1][-1]}'
return f"{prefix}@{domain}" | 36c230ed75fc75fc7ecd6dd2ea71a6b3310c4108 | 708,417 |
def parse_boolean(arg: str):
"""Returns boolean representation of argument."""
arg = str(arg).lower()
if 'true'.startswith(arg):
return True
return False | 2f0a214212aa43a8b27d9a3be04f14af67c586bc | 708,418 |
def ascending_coin(coin):
"""Returns the next ascending coin in order.
>>> ascending_coin(1)
5
>>> ascending_coin(5)
10
>>> ascending_coin(10)
25
>>> ascending_coin(2) # Other values return None
"""
if coin == 1:
return 5
elif coin == 5:
return 10
elif coin == 10:
return 25 | e927d8ac3f38d4b37de71711ac90d6ca2151a366 | 708,419 |
def get_key(rule_tracker, value):
"""
Given an event index, its corresponding key from the dictionary is returned.
Parameters:
rule_tracker (dict): Key-value pairs specific to a rule where key is an activity, pair is an event index
value (int): Index of event in event log
Returns:
key (int): Position of value in rule_tracker
"""
for key in rule_tracker:
if rule_tracker[key] == value:
return key | 1921e9a68d0df0867248ca83e2ba641101735fc7 | 708,421 |
def check_values_on_diagonal(matrix):
"""
Checks if a matrix made out of dictionary of dictionaries has values on diagonal
:param matrix: dictionary of dictionaries
:return: boolean
"""
for line in matrix.keys():
if line not in matrix[line].keys():
return False
return True | bc7979adcfb5dc7c19b3cdb3830cf2397c247846 | 708,423 |
import csv
def build_gun_dictionary(filename):
"""Build a dictionary of gun parameters from an external CSV file:
- Key: the gun designation (e.g. '13.5 in V' or '12 in XI')
- Value: a list of parameters, in the order:
* caliber (in inches)
* maxrange (maximum range in yards)
* longtohit (chance to hit per gun and minute at long range)
* longmin (minimum range considered to be long)
* effectivetohit (chance to hit per gun and minute at effective range)
* effectivemin (minimum range considered to be effective)
* shorttohit (chance to hit per gun and minute at short range)
"""
gundict = {}
with open(filename) as sourcefile:
reader = csv.reader(sourcefile, delimiter=",")
next(reader)
for row in reader:
gundata = list(row)
gundict[gundata[0]] = list(map(float, gundata[1:]))
return gundict | b9e38d766430d44b94ae9fa64c080416fdeb8482 | 708,425 |
def _can_be_quoted(loan_amount, lent_amounts):
"""
Checks if the borrower can obtain a quote. To this aim, the loan amount should be less than or
equal to the total amounts given by lenders.
:param loan_amount: the requested loan amount
:param lent_amounts: the sum of the amounts given by lenders
:return: True if the borrower can get a quote, False otherwise
"""
return sum(lent_amounts) - loan_amount >= 0; | 6fd717f3d0e844752e07e9dd435ff72eaa4b34c9 | 708,429 |
def to_dict(eds, properties=True, lnk=True):
"""
Encode the EDS as a dictionary suitable for JSON serialization.
"""
nodes = {}
for node in eds.nodes:
nd = {
'label': node.predicate,
'edges': node.edges
}
if lnk and node.lnk is not None:
nd['lnk'] = {'from': node.cfrom, 'to': node.cto}
if node.type is not None:
nd['type'] = node.type
if properties:
props = node.properties
if props:
nd['properties'] = props
if node.carg is not None:
nd['carg'] = node.carg
nodes[node.id] = nd
return {'top': eds.top, 'nodes': nodes} | c1a777a0a81ad2e3b9197b3df5e0d35a5174d61f | 708,441 |
def SieveOfEratosthenes(limit=10**6):
"""Returns all primes not greater than limit."""
isPrime = [True]*(limit+1)
isPrime[0] = isPrime[1] = False
primes = []
for i in range(2, limit+1):
if not isPrime[i]:continue
primes += [i]
for j in range(i*i, limit+1, i):
isPrime[j] = False
return primes | 6d1e12d289c9bfcdfadf64f764deba077a09ffd1 | 708,443 |
import torch
def iou_overlaps(b1, b2):
"""
Arguments:
b1: dts, [n, >=4] (x1, y1, x2, y2, ...)
b1: gts, [n, >=4] (x1, y1, x2, y2, ...)
Returns:
intersection-over-union pair-wise, generalized iou.
"""
area1 = (b1[:, 2] - b1[:, 0] + 1) * (b1[:, 3] - b1[:, 1] + 1)
area2 = (b2[:, 2] - b2[:, 0] + 1) * (b2[:, 3] - b2[:, 1] + 1)
# only for giou loss
lt1 = torch.max(b1[:, :2], b2[:, :2])
rb1 = torch.max(b1[:, 2:4], b2[:, 2:4])
lt2 = torch.min(b1[:, :2], b2[:, :2])
rb2 = torch.min(b1[:, 2:4], b2[:, 2:4])
wh1 = (rb2 - lt1 + 1).clamp(min=0)
wh2 = (rb1 - lt2 + 1).clamp(min=0)
inter_area = wh1[:, 0] * wh1[:, 1]
union_area = area1 + area2 - inter_area
iou = inter_area / torch.clamp(union_area, min=1)
ac_union = wh2[:, 0] * wh2[:, 1] + 1e-7
giou = iou - (ac_union - union_area) / ac_union
return iou, giou | ba9b445223fea5ea8332a189b297c8c40205a4e5 | 708,444 |
def updating_node_validation_error(address=False, port=False, id=False,
weight=False):
"""
Verified 2015-06-16:
- when trying to update a CLB node's address/port/id, which are
immutable.
- when trying to update a CLB node's weight to be < 1 or > 100
At least one of address, port, id, and weight should be `True` for this
error to apply.
:param bool address: Whether the address was passed to update
:param bool port: Whether the port was passed to update
:param bool id: Whether the ID was passed to update
:param bool weight: Whether the weight was passed to update and wrong
:return: a `tuple` of (dict body message, 400 http status code)
"""
messages = []
if address:
messages.append("Node ip field cannot be modified.")
if port:
messages.append("Port field cannot be modified.")
if weight:
messages.append("Node weight is invalid. Range is 1-100. "
"Please specify a valid weight.")
if id:
messages.append("Node id field cannot be modified.")
return(
{
"validationErrors": {
"messages": messages
},
"message": "Validation Failure",
"code": 400,
"details": "The object is not valid"
},
400
) | 68c5fdda121950c679afe446bfd7fb19331deb40 | 708,448 |
def parse_numbers(numbers):
"""Return list of numbers."""
return [int(number) for number in numbers] | ee79d4e15cbfb269f7307710d9ad4735687f7128 | 708,449 |
def _non_blank_line_count(string):
"""
Parameters
----------
string : str or unicode
String (potentially multi-line) to search in.
Returns
-------
int
Number of non-blank lines in string.
"""
non_blank_counter = 0
for line in string.splitlines():
if line.strip():
non_blank_counter += 1
return non_blank_counter | dfa6f43af95c898b1f4763573e8bf32ddf659520 | 708,450 |
def encode_direct(list_a: list):
"""Problem 13: Run-length encoding of a list (direct solution).
Parameters
----------
list_a : list
The input list
Returns
-------
list of list
An length-encoded list
Raises
------
TypeError
If the given argument is not of `list` type
"""
if not isinstance(list_a, list):
raise TypeError('The argument given is not of `list` type.')
if len(list_a) <= 1:
# In case of empty or one-element list return.
return list_a
encoded, current, count = [], list_a[0], 1
for element in list_a[1:]:
if current != element:
# If current element does not match the recorded current
# append the count to the list
encoded.append(current if count == 1 else [count, current])
current, count = element, 1
else:
# If another same element is found, increase counter
count += 1
encoded.append(current if count == 1 else [count, current])
return encoded | 9a20ffd2051003d5350f7e059d98c35310bc9bbe | 708,451 |
import math
def truncate(f, n):
"""
Floors float to n-digits after comma.
"""
return math.floor(f * 10 ** n) / 10 ** n | ae7e935a7424a15c02f7cebfb7de6ca9b4c715c0 | 708,454 |
def get_core_blockdata(core_index, spltcore_index, core_bases):
"""
Get Core Offset and Length
:param core_index: Index of the Core
:param splitcore_index: Index of last core before split
:param core_bases: Array with base offset and offset after split
:return: Array with core offset and core length
"""
core_base = int(core_bases[0])
core_len = int(core_bases[1])
core_split = 0
if len(core_bases) > 4:
core_split = int(core_bases[4])
core_offset = core_base + core_index * core_len
if core_split and core_index + 2 > spltcore_index:
core_offset = core_split + (core_index - spltcore_index + 1) * core_len
return [core_offset, core_len] | 85efb96fa45ecfa3f526374c677e57c70e3dc617 | 708,455 |
from datetime import datetime
def timestamp(date):
"""Get the timestamp of the `date`, python2/3 compatible
:param datetime.datetime date: the utc date.
:return: the timestamp of the date.
:rtype: float
"""
return (date - datetime(1970, 1, 1)).total_seconds() | a708448fb8cb504c2d25afa5bff6208abe1159a4 | 708,457 |
def getdate(targetconnection, ymdstr, default=None):
"""Convert a string of the form 'yyyy-MM-dd' to a Date object.
The returned Date is in the given targetconnection's format.
Arguments:
- targetconnection: a ConnectionWrapper whose underlying module's
Date format is used
- ymdstr: the string to convert
- default: The value to return if the conversion fails
"""
try:
(year, month, day) = ymdstr.split('-')
modref = targetconnection.getunderlyingmodule()
return modref.Date(int(year), int(month), int(day))
except Exception:
return default | 21d27c3ef4e99b28b16681072494ce573e592255 | 708,459 |
from datetime import datetime
def datetime_to_epoch(date_time: datetime) -> int:
"""Convert a datetime object to an epoch integer (seconds)."""
return int(date_time.timestamp()) | 73767c663d66464420594e90a438687c9363b884 | 708,465 |
from functools import reduce
def inet_aton(s):
"""Convert a dotted-quad to an int."""
try:
addr = list(map(int, s.split('.')))
addr = reduce(lambda a,b: a+b, [addr[i] << (3-i)*8 for i in range(4)])
except (ValueError, IndexError):
raise ValueError('illegal IP: {0}'.format(s))
return addr | abc16c14e416f55c9ae469b4b9c1958df265433c | 708,466 |
from typing import Optional
from typing import Any
def get_or_create_mpc_section(
mp_controls: "MpConfigControls", section: str, subkey: Optional[str] = None # type: ignore
) -> Any:
"""
Return (and create if it doesn't exist) a settings section.
Parameters
----------
mp_controls : MpConfigControls
The MP Config database.
section : str
The section name (top level settings item)
subkey : Optional[str], optional
Optional subkey to create, by default None
Returns
-------
Any
The settings at that section[subkey] location.
"""
curr_section = mp_controls.get_value(section)
if curr_section is None:
mp_controls.set_value(section, {})
curr_section = mp_controls.get_value(section)
if subkey and subkey not in curr_section:
mp_controls.set_value(f"{section}.{subkey}", {})
return mp_controls.get_value(f"{section}.{subkey}")
return mp_controls.get_value(section) | 60b741f35e0a1c9fe924b472217e0e3b62a1d31e | 708,471 |
def _url_as_filename(url: str) -> str:
"""Return a version of the url optimized for local development.
If the url is a `file://` url, it will return the remaining part
of the url so it can be used as a local file path. For example,
'file:///logs/example.txt' will be converted to
'/logs/example.txt'.
Parameters
----------
url: str The url to check and optaimize.
Returns
-------
str: The url converted to a filename.
"""
return url.replace('file://', '') | d1aef7a08221c7788f8a7f77351ccb6e6af9416b | 708,474 |
def CheckStructuralModelsValid(rootGroup, xyzGridSize=None, verbose=False):
"""
**CheckStricturalModelsValid** - Checks for valid structural model group data
given a netCDF root node
Parameters
----------
rootGroup: netCDF4.Group
The root group node of a Loop Project File
xyzGridSize: [int,int,int] or None
The 3D grid shape to test data in this node to adhere to
verbose: bool
A flag to indicate a higher level of console logging (more if True)
Returns
-------
bool
True if valid structural model data in project file, False otherwise.
"""
valid = True
if "StructuralModels" in rootGroup.groups:
if verbose: print(" Structural Models Group Present")
smGroup = rootGroup.groups.get("StructuralModels")
# if verbose: print(smGroup)
if "easting" in smGroup.ncattrs() and "northing" in smGroup.ncattrs() and "depth" in smGroup.ncattrs():
if xyzGridSize != None:
# Check gridSize from extents matches models sizes
smGridSize = [smGroup.dimensions["easting"].size,smGroup.dimensions["northing"].size,smGroup.dimensions["depth"].size]
if smGridSize != xyzGridSize:
print("(INVALID) Extents grid size and Structural Models Grid Size do NOT match")
print("(INVALID) Extents Grid Size : ", xyzGridSize)
print("(INVALID) Structural Models Grid Size : ", smGridSize)
valid = False
else:
if verbose: print(" Structural Models grid size adheres to extents")
else:
if verbose: print("No structural models extents in project file")
else:
if verbose: print("No Structural Models Group Present")
return valid | d11ce42b041b8be7516f827883a37b40f6f98477 | 708,475 |
def link_name_to_index(model):
""" Generate a dictionary for link names and their indicies in the
model. """
return {
link.name : index for index, link in enumerate(model.links)
} | ba0e768b1160218908b6ecf3b186a73c75a69894 | 708,476 |
import requests
def get_session(token, custom_session=None):
"""Get requests session with authorization headers
Args:
token (str): Top secret GitHub access token
custom_session: e.g. betamax's session
Returns:
:class:`requests.sessions.Session`: Session
"""
session = custom_session or requests.Session()
session.headers = {
"Authorization": "token " + token,
"User-Agent": "testapp"
}
return session | 88bf566144a55cf36daa46d3f9a9886d3257d767 | 708,482 |
import json
def json_formatter(result, _verbose):
"""Format result as json."""
if isinstance(result, list) and "data" in result[0]:
res = [json.dumps(record) for record in result[0]["data"]]
output = "\n".join(res)
else:
output = json.dumps(result, indent=4, sort_keys=True)
return output | 68aae87577370d3acf584014651af21c7cbfa309 | 708,484 |
def pipe_hoop_stress(P, D, t):
"""Calculate the hoop (circumferential) stress in a pipe
using Barlow's formula.
Refs: https://en.wikipedia.org/wiki/Barlow%27s_formula
https://en.wikipedia.org/wiki/Cylinder_stress
:param P: the internal pressure in the pipe.
:type P: float
:param D: the outer diameter of the pipe.
:type D: float
:param t: the pipe wall thickness.
:type t: float
:returns: the hoop stress in the pipe.
:rtype: float
"""
return P * D / 2 / t | 9985d35c2c55e697ce21a880bb2234c160178f33 | 708,485 |
def PositionToPercentile(position, field_size):
"""Converts from position in the field to percentile.
position: int
field_size: int
"""
beat = field_size - position + 1
percentile = 100.0 * beat / field_size
return percentile | c75869f3d7f8437f28d3463fcf12b2b446fe930a | 708,496 |
def chessboard_distance(x_a, y_a, x_b, y_b):
"""
Compute the rectilinear distance between
point (x_a,y_a) and (x_b, y_b)
"""
return max(abs(x_b-x_a),abs(y_b-y_a)) | 9b11bf328faf3b231df23585914f20c2efd02bf9 | 708,502 |
def str_with_tab(indent: int, text: str, uppercase: bool = True) -> str:
"""Create a string with ``indent`` spaces followed by ``text``."""
if uppercase:
text = text.upper()
return " " * indent + text | 3306ba86781d272a19b0e02ff8d06da0976d7282 | 708,503 |
def mvg_logpdf_fixedcov(x, mean, inv_cov):
"""
Log-pdf of the multivariate Gaussian distribution where the determinant and inverse of the covariance matrix are
precomputed and fixed.
Note that this neglects the additive constant: -0.5 * (len(x) * log(2 * pi) + log_det_cov), because it is
irrelevant when comparing pdf values with a fixed covariance, but it means that this is not the normalised pdf.
Args:
x (1D numpy array): Vector value at which to evaluate the pdf.
mean (1D numpy array): Mean vector of the multivariate Gaussian distribution.
inv_cov (2D numpy array): Inverted covariance matrix.
Returns:
float: Log-pdf value.
"""
dev = x - mean
return -0.5 * (dev @ inv_cov @ dev) | 648d1925ed4b4793e8e1ce1cec8c7ccd0efb9f6b | 708,506 |
def extrode_multiple_urls(urls):
""" Return the last (right) url value """
if urls:
return urls.split(',')[-1]
return urls | 34ec560183e73100a62bf40b34108bb39f2b04b4 | 708,508 |
def take_last_while(predicate, list):
"""Returns a new list containing the last n elements of a given list, passing
each value to the supplied predicate function, and terminating when the
predicate function returns false. Excludes the element that caused the
predicate function to fail. The predicate function is passed one argument:
(value)"""
for i, e in enumerate(reversed(list)):
if not predicate(e):
return list[-i:]
return list | 19468c9130e9ab563eebd97c30c0e2c74211e44b | 708,509 |
def abs_p_diff(predict_table, categA='sandwich', categB='sushi'):
"""Calculates the absolute distance between two category predictions
:param predict_table: as returned by `predict_table`
:param categA: the first of two categories to compare
:param categB: the second of two categoreis to compare
:returns: series with the absolute difference between the predictions
:rtype: pandas Series
"""
return abs(predict_table['p_%s' % categA] - predict_table['p_%s' % categB]) | 235bfc7df29ac4a2b67baff9dfa3ee62204a9aed | 708,512 |
def validate_engine_mode(engine_mode):
"""
Validate database EngineMode for DBCluster
Property: DBCluster.EngineMode
"""
VALID_DB_ENGINE_MODES = (
"provisioned",
"serverless",
"parallelquery",
"global",
"multimaster",
)
if engine_mode not in VALID_DB_ENGINE_MODES:
raise ValueError(
"DBCluster EngineMode must be one of: %s" % ", ".join(VALID_DB_ENGINE_MODES)
)
return engine_mode | 69f7952a998b6ca593106c92710909104e21f55f | 708,514 |
def num_false_positives(df):
"""Total number of false positives (false-alarms)."""
return df.noraw.Type.isin(['FP']).sum() | 6aa339b86d15072c6a6910a43e70281575da5d36 | 708,515 |
def gcd_recursive_by_divrem(m, n):
"""
Computes the greatest common divisor of two numbers by recursively getting remainder from
division.
:param int m: First number.
:param int n: Second number.
:returns: GCD as a number.
"""
if n == 0:
return m
return gcd_recursive_by_divrem(n, m % n) | bd25d9cea4813e523ea6bb9bd85c24bf43dd2744 | 708,516 |
def make_list_table(headers, data, title='', columns=None):
"""Build a list-table directive.
:param headers: List of header values.
:param data: Iterable of row data, yielding lists or tuples with rows.
:param title: Optional text to show as the table title.
:param columns: Optional widths for the columns.
"""
results = []
add = results.append
add('.. list-table:: %s' % title)
add(' :header-rows: 1')
if columns:
add(' :widths: %s' % (','.join(str(c) for c in columns)))
add('')
add(' - * %s' % headers[0])
for h in headers[1:]:
add(' * %s' % h)
for row in data:
add(' - * %s' % row[0])
for r in row[1:]:
add(' * %s' % r)
add('')
return '\n'.join(results) | 569370b8359ad25bf255f940b5a89d93d896804d | 708,530 |
def pre_process(dd, df, dataset_len, batch_size):
"""Partition one dataframe to multiple small dataframes based on a given batch size."""
df = dd.str2ascii(df, dataset_len)
prev_chunk_offset = 0
partitioned_dfs = []
while prev_chunk_offset < dataset_len:
curr_chunk_offset = prev_chunk_offset + batch_size
chunk = df.iloc[prev_chunk_offset:curr_chunk_offset:1]
partitioned_dfs.append(chunk)
prev_chunk_offset = curr_chunk_offset
return partitioned_dfs | a0a19916d60476430bdaf27f85f31620f2b5ae2a | 708,532 |
def _make_unique(key, val):
"""
Make a tuple of key, value that is guaranteed hashable and should be unique per value
:param key: Key of tuple
:param val: Value of tuple
:return: Unique key tuple
"""
if type(val).__hash__ is None:
val = str(val)
return key, val | 65d746276f635c129aa0a5aeb9b9f467453c0b2a | 708,533 |
def headline(
in_string,
surround = False,
width = 72,
nr_spaces = 2,
spacesym = ' ',
char = '=',
border = None,
uppercase = True,
):
"""return in_string capitalized, spaced and sandwiched:
============================== T E S T ===============================
Parameters are the following:
* char (one-letter string, default='='):
changes the character the title is put between.
* surround (boolean, default=False):
adds additional lines above and under in_string:
====================================================
==================== T E S T =====================
====================================================
* width (int, default=72):
defines the width of each line.
* nr_spaces (int, default=2):
defines number of nr_spaces between in_string and the
char as indicated in ..====__T I T L E__====.. .
* spacesym (one-letter string, default=' '):
instead of using a whitespace to seperate the 'title' letters,
one can use every other character, e.g. '_'.
* border (either string or list/tuple of two strings; defaults to char):
If this is a single character string, it will be used at the left
and right end of the headline.
If this is multiple character string, it will be used at the left
and mirrored at the right. This way you can easily introduce additional
space if you prefer and use, for example c style like inline comments
with border="/*".
If this is not enough for you, the left and right borders can be given
seperately, like in border=("<!--", "-->")
* uppercase (boolean, default=True):
if True, headline will capitalize the letters given by in_string.
if False, in_string will be used as it is given.
"""
if isinstance(border, tuple) or isinstance(border, list):
left_border = border[0]
right_border = border[1]
else:
if border is None:
border = char
left_border = border
right_border = border[::-1]
nr_sym_spaces = len(left_border + right_border)
headline_text = spacesym.join(
l.upper() if uppercase else l for l in in_string
)
headline_text_sandwiched = '{:{}^{}}'.format(
headline_text,
spacesym,
2 * (len(in_string) + nr_spaces) - 1
)
headline_without_sym = '{:{}^{}}'.format(
headline_text_sandwiched,
char,
width - nr_sym_spaces
)
headline_full = '{1}{0}{2}'.format(
headline_without_sym,
left_border,
right_border
)
if surround:
line = '{1}{0}{2}'.format(
(width - nr_sym_spaces) * char,
left_border,
right_border
)
output = line + '\n' + headline_full + '\n' + line
else:
output = headline_full
return output | 1848d91bbf6c9d2216338f35433a26bcd3854664 | 708,534 |
def get_label_names(l_json):
"""
Get names of all the labels in given json
:param l_json: list of labels jsons
:type l_json: list
:returns: list of labels names
:rtype: list
"""
llist = []
for j in l_json:
llist.append(j['name'])
return llist | bab12bedc8b5001b94d6c5f02264b1ebf4ab0e99 | 708,536 |
def interpolate_peak(spectrum: list, peak: int) -> float:
""" Uses quadratic interpolation of spectral peaks to get a better estimate of the peak.
Args:
- spectrum: the frequency bin to analyze.
- peak: the location of the estimated peak in the spectrum list.
Based off: https://ccrma.stanford.edu/~jos/sasp/Quadratic_Interpolation_Spectral_Peaks.html
"""
prev_neighbour = spectrum[peak-1]
next_neighbour = spectrum[peak+1]
peak_value = spectrum[peak]
estimated_peak = (next_neighbour
- prev_neighbour) / (2 * peak_value - prev_neighbour - next_neighbour) + peak
return abs(estimated_peak) | 0e74057908e7839438325da9adafdf385012ce17 | 708,539 |
import hashlib
def calc_fingerprint(text):
"""Return a hex string that fingerprints `text`."""
return hashlib.sha1(text).hexdigest() | 8be154e4e32ae9412a73e73397f0e0198ae9c862 | 708,541 |
def has_balanced_parens(exp: str) -> bool:
"""
Checks if the parentheses in the given expression `exp` are balanced,
that is, if each opening parenthesis is matched by a corresponding
closing parenthesis.
**Example:**
::
>>> has_balanced_parens("(((a * b) + c)")
False
:param exp: The expression to check.
:return: `True` if the parentheses are balanced, `False` otherwise.
"""
# Use a stack to determine if the expression is balanced.
# Ref: https://youtu.be/HJOnJU77EUs?t=75 [1:15 - 2:47]
paren_stack = []
for e in exp:
if e == '(':
paren_stack.append(e)
elif e == ')':
try:
paren_stack.pop()
except IndexError:
return False
return len(paren_stack) == 0 | f76c7cafcf6aadd0c2cb947f0c49d23835a9f6e4 | 708,543 |
def _is_binary(c):
"""Ensures character is a binary digit."""
return c in '01' | b763a5a8ba591b100fea64a589dcb0aea9fbcf53 | 708,544 |
def strip_trailing_characters(unstripped_string, tail):
"""
Strip the tail from a string.
:param unstripped_string: The string to strip. Ex: "leading"
:param tail: The trail to remove. Ex: "ing"
:return: The stripped string. Ex: "lead"
"""
if unstripped_string.endswith(str(tail)):
return unstripped_string[:len(tail)]
else:
return unstripped_string | dbd09fe9a58b0fb3072a680a9c7ac701257ebfcd | 708,549 |
from typing import Optional
def q_to_res(Q: float) -> Optional[float]:
"""
:param Q: Q factor
:return: res, or None if Q < 0.25
"""
res = 1 - 1.25 / (Q + 1)
if res < 0.0:
return None
return res | 98380be0c8fbd3bfd694d7851f35488d74cdd862 | 708,554 |
def id_str_to_bytes(id_str: str) -> bytes:
"""Convert a 40 characters hash into a byte array.
The conversion results in 160 bits of information (20-bytes array). Notice
that this operation is reversible (using `id_bytes_to_str`).
Args:
id_str: Hash string containing 40 characters.
Returns:
bytes: The ID converted to bytes.
"""
return int(id_str, 16).to_bytes(20, byteorder='big') | cd6a702343f1267e17710305f9aed70613feacb3 | 708,555 |
import math
def sieve(n):
"""
Returns a list with all prime numbers up to n.
>>> sieve(50)
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47]
>>> sieve(25)
[2, 3, 5, 7, 11, 13, 17, 19, 23]
>>> sieve(10)
[2, 3, 5, 7]
>>> sieve(9)
[2, 3, 5, 7]
>>> sieve(2)
[2]
>>> sieve(1)
[]
"""
l = [True] * (n + 1) # noqa: E741
prime = []
start = 2
end = int(math.sqrt(n))
while start <= end:
# If start is a prime
if l[start] is True:
prime.append(start)
# Set multiples of start be False
for i in range(start * start, n + 1, start):
if l[i] is True:
l[i] = False
start += 1
for j in range(end + 1, n + 1):
if l[j] is True:
prime.append(j)
return prime | f6c930c604839ba1872bd3168c76b353606ee8ee | 708,558 |
def is_trueish(expression: str) -> bool:
"""True if string and "True", "Yes", "On" (ignorecase), False otherwise"""
expression = str(expression).strip().lower()
return expression in {'true', 'yes', 'on'} | 7d958c068281deb68de7665dc1eeb07acf5e941f | 708,559 |
import re
def contains_order_by(query):
"""Returns true of the query contains an 'order by' clause"""
return re.search( r'order\s+by\b', query, re.M|re.I) is not None | 4f4eebadfd5dc4cb1121378db4ef5f68d27bf787 | 708,560 |
def mean(l):
"""
Returns the mean value of the given list
"""
sum = 0
for x in l:
sum = sum + x
return sum / float(len(l)) | 74926c9aaafd2362ce8821d7040afcba1f569400 | 708,564 |
from typing import OrderedDict
def merge_nodes(nodes):
"""
Merge nodes to deduplicate same-name nodes and add a "parents"
attribute to each node, which is a list of Node objects.
"""
def add_parent(unique_node, parent):
if getattr(unique_node, 'parents', None):
if parent.name not in unique_node.parents:
unique_node.parents[parent.name] = parent
else:
unique_node.parents = {parent.name: parent}
names = OrderedDict()
for node in nodes:
if node.name not in names:
names[node.name] = node
add_parent(names[node.name], node.parent)
else:
add_parent(names[node.name], node.parent)
return names.values() | 1f5a2a7188071d9d6f8b6ab1f25ceff1a0ba8484 | 708,566 |
def find_records(dataset, search_string):
"""Retrieve records filtered on search string.
Parameters:
dataset (list): dataset to be searched
search_string (str): query string
Returns:
list: filtered list of records
"""
records = [] # empty list (accumulator pattern)
for record in dataset:
if search_string.lower() in record.lower(): # case insensitive
records.append(record) # add to new list
return records | c6cbd5c239f410a8658e62c1bbacc877eded5105 | 708,567 |
def filter_spans(spans):
"""Filter a sequence of spans and remove duplicates or overlaps. Useful for
creating named entities (where one token can only be part of one entity) or
when merging spans with `Retokenizer.merge`. When spans overlap, the (first)
longest span is preferred over shorter spans.
spans (iterable): The spans to filter.
RETURNS (list): The filtered spans.
"""
get_sort_key = lambda span: (span.end - span.start, -span.start)
sorted_spans = sorted(spans, key=get_sort_key, reverse=True)
result = []
seen_tokens = set()
for span in sorted_spans:
# Check for end - 1 here because boundaries are inclusive
if span.start not in seen_tokens and span.end - 1 not in seen_tokens:
result.append(span)
seen_tokens.update(range(span.start, span.end))
result = sorted(result, key=lambda span: span.start)
return result | 3b15a79b14f02ffa870b94eb9b61261c4befc0eb | 708,571 |
from typing import Optional
from typing import List
def to_lines(text: str, k: int) -> Optional[List[str]]:
"""
Given a block of text and a maximum line length k, split the text into lines of length at most k.
If this cannot be done, i.e. a word is longer than k, return None.
:param text: the block of text to process
:param k: the maximum length of each line
:return: the list of lines
>>> text = 'the quick brown fox jumps over the lazy dog'
>>> to_lines(text, 4) is None
True
>>> to_lines(text, 5)
['the', 'quick', 'brown', 'fox', 'jumps', 'over', 'the', 'lazy', 'dog']
>>> to_lines(text, 9)
['the quick', 'brown fox', 'jumps', 'over the', 'lazy dog']
>>> to_lines(text, 10)
['the quick', 'brown fox', 'jumps over', 'the lazy', 'dog']
>>> to_lines(text, 12)
['the quick', 'brown fox', 'jumps over', 'the lazy dog']
>>> to_lines('AAAAA', 5)
['AAAAA']
"""
def line_to_str(l: List[str]) -> str:
return ' '.join(l)
# If there is no text or the line length is 0, we can't do anything.
if not text or not k:
return None
# If any word is longer then k, we can't do anything.
words = text.split()
if max(len(word) for word in words) > k:
return None
# Now split the word into lines.
lines = []
line = []
len_so_far = 0
for word in words:
len_word = len(word)
if len_word + len_so_far <= k:
# We add the word to the line plus a blank space afterwards.
# If this is the last word in the line, the blank space will not occur; hence why we check the
# condition <= k rather than < k.
line.append(word)
len_so_far += len_word + 1
else:
# Make the line into a string, add it to lines, and reset everything.
lines.append(line_to_str(line))
line = [word]
len_so_far = len_word + 1
# Last case: if we have a partial line, add it.
if line:
lines.append(line_to_str(line))
# Assert that none of the lines went over the length.
for line in lines:
assert(len(line) <= k)
return lines | 1797f45ce4999a29a9cc74def3f868e473c2775a | 708,575 |
def product_except_self(nums: list[int]) -> list[int]:
"""Computes the product of all the elements of given array at each index excluding the value at that index.
Note: could also take math.prod(nums) and divide out the num at each index,
but corner cases of num_zeros > 1 and num_zeros == 1 make code inelegant.
Args:
nums:
Returns:
Examples:
>>> product_except_self([])
[]
>>> product_except_self([1,2,3,4])
[24, 12, 8, 6]
>>> product_except_self([-1,1,0,-3,3])
[0, 0, 9, 0, 0]
"""
"""ALGORITHM"""
## INITIALIZE VARS ##
nums_sz = len(nums)
# DS's/res
nums_products_except_i = [1] * nums_sz
## Multiply against product of all elements PRECEDING i
total_product = 1
for i in range(nums_sz):
nums_products_except_i[i] *= total_product
total_product *= nums[i]
## Multiply against product of all elements FOLLOWING i
total_product = 1
for i in reversed(range(nums_sz)):
nums_products_except_i[i] *= total_product
total_product *= nums[i]
return nums_products_except_i | 15090d4873b0dec9ea6119e7c097ccda781e51fa | 708,576 |
def get_option(args, config, key, default=None):
"""Gets key option from args if it is provided, otherwise tries to get it from config"""
if hasattr(args, key) and getattr(args, key) is not None:
return getattr(args, key)
return config.get(key, default) | 54d77c6ae3e40b2739156b07747facc4a952c237 | 708,580 |
import re
def parse_extension(uri):
""" Parse the extension of URI. """
patt = re.compile(r'(\.\w+)')
return re.findall(patt, uri)[-1] | 5ed4eee77b92f04e62390128939113168d715342 | 708,581 |
import math
def rotz(ang):
"""
Calculate the transform for rotation around the Z-axis.
Arguments:
angle: Rotation angle in degrees.
Returns:
A 4x4 numpy array of float32 representing a homogeneous coordinates
matrix for rotation around the Z axis.
"""
rad = math.radians(ang)
c = math.cos(rad)
s = math.sin(rad)
return [
[c, -s, 0.0, 0.0],
[s, c, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
] | 4332242c5818ccc00d64cbebf7a861727e080964 | 708,582 |
def getBits(data, offset, bits=1):
"""
Get specified bits from integer
>>> bin(getBits(0b0011100,2))
'0b1'
>>> bin(getBits(0b0011100,0,4))
'0b1100'
"""
mask = ((1 << bits) - 1) << offset
return (data & mask) >> offset | 0bdae35f5afa076d0e5a73b91d2743d9cf156f7d | 708,583 |
def replace_ext(filename, oldext, newext):
"""Safely replaces a file extension new a new one"""
if filename.endswith(oldext):
return filename[:-len(oldext)] + newext
else:
raise Exception("file '%s' does not have extension '%s'" %
(filename, oldext)) | 33ab99860cfe90b72388635d5d958abe431fa45e | 708,587 |
import torch
def normalized_grid_coords(height, width, aspect=True, device="cuda"):
"""Return the normalized [-1, 1] grid coordinates given height and width.
Args:
height (int) : height of the grid.
width (int) : width of the grid.
aspect (bool) : if True, use the aspect ratio to scale the coordinates, in which case the
coords will not be normalzied to [-1, 1]. (Default: True)
device : the device the tensors will be created on.
"""
aspect_ratio = width/height if aspect else 1.0
window_x = torch.linspace(-1, 1, steps=width, device=device) * aspect_ratio
window_y = torch.linspace(1, -1, steps=height, device=device)
coord = torch.stack(torch.meshgrid(window_x, window_y, indexing='ij')).permute(2,1,0)
return coord | 7ddd1c5eda2e28116e40fa99f6cd794d9dfd48cc | 708,594 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.