content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def A_real_deph(Q_deph, Kt_real_deph, deltaT_diff_deph):
"""
Calculates the real heatransfer area.
Parameters
----------
Q_deph : float
The heat load of dephlegmator, [W] , [J/s]
deltaT_diff_deph : float
The coefficient difference of temperatures, [degrees celcium]
Kt_real_deph : float
The heat ransfer coefficient [W/(m**2 * degrees celcium)]
Returns
-------
A_real_deph : float
The real heat ransfer area, [m**2]
References
----------
Романков, формула 4.72, стр.168
"""
return Q_deph / (Kt_real_deph * deltaT_diff_deph) | 5c70a6e179922f90fbb4fda859d6911eb1f048e6 | 707,763 |
import random
import bisect
def generate_sector(size: int, object_weight: list) -> dict:
"""
Generates an Sector with Weighted Spawns
Args:
size: Int Representing the Size of the Sector (Size X Size)
object_weight: An Nested List with Object / Value Types
Examples:
generate_sector(6, [["*", 50], ["#", 10]]) would output an Map File where * is far more Common than #
Returns:
An Dict with Lists inside which Represent the Map Data per Row
"""
if size is 0:
raise ValueError("The Sector Size cant be 0")
size += 1
output = {}
placed_player = False
totals = []
running_total = 0
for w in object_weight:
running_total += w[1]
totals.append(running_total)
def next():
"""
Gets an Random Object from the Object - Weight List
"""
ran = random.random() * totals[-1]
i = bisect.bisect_right(totals, ran)
return object_weight[i][0]
for x in range(1, size):
row = []
for y in range(1, size):
object = next()
if placed_player is False and object is "@":
row.append(object)
placed_player = True
continue
elif placed_player is True and object is "@":
while object is "@":
object = next()
row.append(object)
output[x] = row
return output | 514195b66c707b2e0dd67ea47b57fe56c1d28a86 | 707,764 |
def compare_AlphaFz(sq_amp,sq_amp_baseline):
"""
Compare the baseline alpha squared amplitude with that of a single epoch.
Parameters
----------
sq_amp: float
Alpha squared amplitude (Fz) from a single epoch
cnt_baseline: float
Baseline alpha squared amplitude (Fz)
Returns
-------
feedback_val: float
Feedback value for stimulus presentation [-1,1]
"""
relative_error = (sq_amp-sq_amp_baseline)/sq_amp_baseline
feedback_val = relative_error
if feedback_val>1:
feedback_val = 1
elif feedback_val<-1:
feedback_val = -1
return feedback_val | 290560dc815393799d61f51a7684b4bde309dbac | 707,766 |
def _unpad(string: str) -> str:
"""Un-pad string."""
return string[: -ord(string[len(string) - 1 :])] | dbd036afabc29047201a9ed2d6b299bb5fe3ba0f | 707,767 |
from pathlib import Path
import shutil
def copy_dir_to_target(source_directory: Path, destination_directory: Path) -> bool:
"""
Args:
source_directory: a folder to copy
destination_directory: the parent directory to copy source_directory into
Returns: True if copy was successful, False otherwise
"""
if source_directory.exists() and source_directory.is_dir():
print("Found directory at %s" % source_directory.resolve())
else:
print("Unable to find required folder, looked at %s" % source_directory.resolve())
return False
print("Copying to %s" % destination_directory)
shutil.copytree(str(source_directory), str(destination_directory / source_directory.name))
return True | 2dd67db56c17c787ea69189c52db11edcfcb0d3c | 707,768 |
from typing import Optional
def get_next_url(bundle: dict) -> Optional[str]:
"""
Returns the URL for the next page of a paginated ``bundle``.
>>> bundle = {
... 'link': [
... {'relation': 'self', 'url': 'https://example.com/page/2'},
... {'relation': 'next', 'url': 'https://example.com/page/3'},
... {'relation': 'previous', 'url': 'https://example.com/page/1'},
... ]
... }
>>> get_next_url(bundle)
'https://example.com/page/3'
>>> bundle = {
... 'link': [
... {'relation': 'self', 'url': 'https://example.com/page/1'},
... ]
... }
>>> type(get_next_url(bundle))
<class 'NoneType'>
"""
if 'link' in bundle:
for link in bundle['link']:
if link['relation'] == 'next':
return link['url'] | 0fafa4dc56fb5e03838652419e94dceb8aed9e75 | 707,769 |
def spltime(tseconds):
""" This gets the time in hours, mins and seconds """
hours = tseconds // 3600
minutes = int(tseconds / 60) % 60
seconds = tseconds % 60
return hours, minutes, seconds | a8ba14879da51ebbeac2ba201fc562a22fe13364 | 707,771 |
def draw_text(text, bgcolor, plt_ax, text_plt):
"""
Render the text
:param str text: text to render
:param str bgcolor: backgroundcolor used to render text
:param matplotlib.axes.Axes plt_ax: figure sub plot instance
:param matplotlib.text.Text text_plt: plot of text
:return matplotlib.text.Text: updated plot of text
"""
if text_plt is None:
# render text with color
text_plt = plt_ax.text(0.95, 0.95, text, backgroundcolor=bgcolor,
horizontalalignment='right', verticalalignment='top',
transform=plt_ax.transAxes, fontsize=10)
else:
# update existing text
text_plt.set_text(text)
return text_plt | 478ada3b4fbb3add935713268415cd4606ef58b3 | 707,772 |
import uuid
def generate_uuid() -> str:
"""
Generate UUIDs to use as `sim.base_models.Node` and `sim.base_models.Item` ids.
"""
return str(uuid.uuid4()) | 9428676bb633873a2f32c53172146486f1421234 | 707,774 |
def merge_dicts(a, b):
"""combine two dictionaries, assuming components are arrays"""
result = a
for k, v in b.items():
if k not in result:
result[k] = []
result[k].extend(v)
return result | de465179faf1bd9ace312fa4b21d332ac994b72b | 707,775 |
import re
def parse_signature(signature):
"""
Parses one signature
:param signature: stanc3 function signature
:return: return type, fucntion name and list of function argument types
"""
return_type, rest = signature.split(" ", 1)
function_name, rest = rest.split("(", 1)
args = re.findall(r"(?:[(][^()]+[)][^,()]+)|(?:[^,()]+(?:,*[]])?)", rest)
args = [i.strip() for i in args if i.strip()]
return return_type, function_name, args | 11da2fb6008274f8d9a959651a181f127c85a34e | 707,778 |
def _GetKeyKind(key):
"""Return the kind of the given key."""
return key.path().element_list()[-1].type() | c37f1d889e484390449de682e3d6c6b9d4521ce4 | 707,781 |
import yaml
def load_config() -> dict:
"""
Loads the config.yml file to memory and returns it as dictionary.
:return: Dictionary containing the config.
"""
with open('config.yml', 'r') as ymlfile:
return yaml.load(ymlfile, Loader=yaml.FullLoader) | 6e05aa4eb6a7d9862814f595ecdc89ffab145ee5 | 707,782 |
def get_rb_data_attribute(xmldict, attr):
"""Get Attribute `attr` from dict `xmldict`
Parameters
----------
xmldict : dict
Blob Description Dictionary
attr : str
Attribute key
Returns
-------
sattr : int
Attribute Values
"""
try:
sattr = int(xmldict["@" + attr])
except KeyError:
raise KeyError(
f"Attribute @{attr} is missing from "
"Blob Description. There may be some "
"problems with your file"
)
return sattr | dfc48ad47f67b2303874154ce4a164a176c1f4bf | 707,786 |
def tls_params(mqtt_config):
"""Return the TLS configuration parameters from a :class:`.MQTTConfig`
object.
Args:
mqtt_config (:class:`.MQTTConfig`): The MQTT connection settings.
Returns:
dict: A dict {'ca_certs': ca_certs, 'certfile': certfile,
'keyfile': keyfile} with the TLS configuration parameters, or None if
no TLS connection is used.
.. versionadded:: 0.6.0
"""
# Set up a dict containing TLS configuration parameters for the MQTT
# client.
if mqtt_config.tls.hostname:
return {'ca_certs': mqtt_config.tls.ca_file,
'certfile': mqtt_config.tls.client_cert,
'keyfile': mqtt_config.tls.client_key}
# Or don't use TLS.
else:
return None | 4b5d214a50fea60f5cb325fc7a0c93dfa9cb3c02 | 707,787 |
def generate_parallelogrammatic_board(width=5, height=5):
"""
Creates a board with a shape of a parallelogram.
Width and height specify the size (in fields) of the board.
"""
return [[1] * height for _ in range(width)] | 1c9bd6e6e26f6693b434d44e6dbe4085ba9236b8 | 707,791 |
from typing import get_origin
from typing import Tuple
def is_tuple(typ) -> bool:
"""
Test if the type is `typing.Tuple`.
"""
try:
return issubclass(get_origin(typ), tuple)
except TypeError:
return typ in (Tuple, tuple) | c8c75f4b1523971b20bbe8c716ced53199150b95 | 707,794 |
import functools
import unittest
def _skip_if(cond, reason):
"""Skip test if cond(self) is True"""
def decorator(impl):
@functools.wraps(impl)
def wrapper(self, *args, **kwargs):
if cond(self):
raise unittest.SkipTest(reason)
else:
impl(self, *args, **kwargs)
return wrapper
return decorator | 4141cc1f99c84633bdf2e92941d9abf2010c11f6 | 707,795 |
import ctypes
def destructor(cfunc):
"""
Make a C function a destructor.
Destructors accept pointers to void pointers as argument. They are also wrapped as a staticmethod for usage in
classes.
:param cfunc: The C function as imported by ctypes.
:return: The configured destructor.
"""
cfunc.argtypes = [ctypes.POINTER(ctypes.c_void_p)]
cfunc.restype = None
return staticmethod(cfunc) | 05abd181649a2178d4dce704ef93f61eb5418092 | 707,796 |
def update_file_info_in_job(job, file_infos):
"""
Update the 'setup.package.fileInformations' data in the JSON to append new file information.
"""
for file_info in file_infos:
try:
job['setup']['package']['fileInformations'].append(file_info)
except (KeyError, TypeError, AttributeError):
# If we get here, 'setup.package.fileInformations' does not exist yet.
print('Job file input is missing required setup.package.fileInformations data.')
exit(1)
return job | 9902173548d72fcd35c8f80bb44b59aac27d9401 | 707,797 |
import math
def distance(x1: float, y1: float, x2: float, y2: float) -> float:
"""
Finds distance between two given points
Parameters:
x1, y1 : The x and y coordinates of first point
x2, y2 : The x and y coordinates of second point
Returns:
Distance upto two decimal places.
"""
distance = math.sqrt( ((x1-x2)**2)+((y1-y2)**2) )
return round(distance,2) | 63f103f46b52aae146b52f385e15bc3441f042e5 | 707,798 |
def _str_trim_left(x):
"""
Remove leading whitespace.
"""
return x.str.replace(r"^\s*", "") | 2718086073706411929b45edf80a1d464dfaeff6 | 707,799 |
def print_formula(elements):
"""
The input dictionary, atoms and their amount, is processed to produce
the chemical formula as a string
Parameters
----------
elements : dict
The elements that form the metabolite and their corresponding amount
Returns
-------
formula : str
The formula of the metabolite
"""
formula = "".join([f"{k}{int(v)}" for k, v in elements.items()])
return formula | a3c404ef0d18c417e44aee21106917f4ee203065 | 707,801 |
import unicodedata
def is_chinese_char(cc):
"""
Check if the character is Chinese
args:
cc: char
output:
boolean
"""
return unicodedata.category(cc) == 'Lo' | d376e6097e628ac2f3a7934ba42ee2772177f857 | 707,802 |
def format_component_descriptor(name, version):
"""
Return a properly formatted component 'descriptor' in the format
<name>-<version>
"""
return '{0}-{1}'.format(name, version) | 2edb92f20179ae587614cc3c9ca8198c9a4c240e | 707,804 |
import re
def replace_empty_bracket(tokens):
"""
Remove empty bracket
:param tokens: List of tokens
:return: Fixed sequence
"""
merged = "".join(tokens)
find = re.search(r"\{\}", merged)
while find:
merged = re.sub(r"\{\}", "", merged)
find = re.search(r"\{\}", merged)
return list(merged) | fd2c9f2f1c2e199056e89dbdba65f92e4d5834eb | 707,808 |
def extra_credit(grades,students,bonus):
"""
Returns a copy of grades with extra credit assigned
The dictionary returned adds a bonus to the grade of
every student whose netid is in the list students.
Parameter grades: The dictionary of student grades
Precondition: grades has netids as keys, ints as values.
Parameter netids: The list of students to give extra credit
Precondition: netids is a list of valid (string) netids
Parameter bonus: The extra credit bonus to award
Precondition: bonus is an int
"""
# DICTIONARY COMPREHENSION
#return { k:(grades[k]+bonus if k in students else grades[k]) for k in grades }
# ACCUMULATOR PATTERN
result = {}
for k in grades:
if k in students:
result[k] = grades[k]+bonus
else:
result[k] = grades[k]
return result | 334a9edb3d1d045832009e20c6cba7f24e5c181d | 707,809 |
def get_rectangle(origin, end):
"""Return all points of rectangle contained by origin and end."""
size_x = abs(origin[0]-end[0])+1
size_y = abs(origin[1]-end[1])+1
rectangle = []
for x in range(size_x):
for y in range(size_y):
rectangle.append((origin[0]+x, origin[1]+y))
return rectangle | 36badfd8aefaaeda806215b02ed6e92fce6509a3 | 707,810 |
def policy(Q):
"""Hard max over prescriptions
Params:
-------
* Q: dictionary of dictionaries
Nested dictionary representing a table
Returns:
-------
* policy: dictonary of states to policies
"""
pol = {}
for s in Q:
pol[s] = max(Q[s].items(), key=lambda x: x[1])[0]
return pol | e69f66fba94b025034e03428a5e93ba1b95918e8 | 707,811 |
def user_directory_path(instance, filename):
"""Sets path to user uploads to: MEDIA_ROOT/user_<id>/<filename>"""
return f"user_{instance.user.id}/{filename}" | 84be5fe74fa5059c023d746b2a0ff6e32c14c10d | 707,812 |
def pa11y_counts(results):
"""
Given a list of pa11y results, return three integers:
number of errors, number of warnings, and number of notices.
"""
num_error = 0
num_warning = 0
num_notice = 0
for result in results:
if result['type'] == 'error':
num_error += 1
elif result['type'] == 'warning':
num_warning += 1
elif result['type'] == 'notice':
num_notice += 1
return num_error, num_warning, num_notice | 346c1efe0cae5934e623a8643b0f23f85300181d | 707,813 |
import requests
def http_request(method, url, headers, data=None):
"""
Request util
:param method: GET or POST or PUT
:param url: url
:param headers: headers
:param data: optional data (needed for POST)
:return: response text
"""
response = requests.request(method, url, headers=headers, data=data)
if response.status_code not in [200, 201, 204]:
http_error_msg = u'%s HTTP request failed: %s for url: %s' % (response.status_code, response.text, url)
#print ("utils.http_request ", http_error_msg)
raise requests.exceptions.HTTPError(response.text)
return response.text | 6d0453be79b3ae0f7ed60b5a8759b9295365dd6c | 707,814 |
def parse_title(line):
"""if this is title, return Tuple[level, content],
@type line: str
@return: Optional[Tuple[level, content]]
"""
line = line.strip()
if not line.startswith('#'):
return None
sharp_count = 0
for c in line:
if c == '#':
sharp_count += 1
else:
break
if sharp_count == len(line):
return None
title = line[sharp_count:].strip()
return sharp_count, title | 7c170f417755c878d225b780b8475a379501c19f | 707,815 |
import itertools
import functools
def next_count(start: int = 0, step: int = 1):
"""Return a callable returning descending ints.
>>> nxt = next_count(1)
>>> nxt()
1
>>> nxt()
2
"""
count = itertools.count(start, step)
return functools.partial(next, count) | 299d457b2b449607ab02877eb108c076cb6c3e16 | 707,819 |
import json
def make_img_id(label, name):
""" Creates the image ID for an image.
Args:
label: The image label.
name: The name of the image within the label.
Returns:
The image ID. """
return json.dumps([label, name]) | 4ddcbf9f29d8e50b0271c6ee6260036b8654b90f | 707,820 |
def scale(pix, pixMax, floatMin, floatMax):
""" scale takes in
pix, the CURRENT pixel column (or row)
pixMax, the total # of pixel columns
floatMin, the min floating-point value
floatMax, the max floating-point value
scale returns the floating-point value that
corresponds to pix
"""
return (pix / pixMax) * (floatMax - floatMin) + floatMin | 455d0233cbeeafd53c30baa4584dbdac8502ef94 | 707,822 |
def make_set(value):
"""
Takes a value and turns it into a set
!!!! This is important because set(string) will parse a string to
individual characters vs. adding the string as an element of
the set i.e.
x = 'setvalue'
set(x) = {'t', 'a', 'e', 'v', 'u', 's', 'l'}
make_set(x) = {'setvalue'}
or use set([x,]) by adding string as first item in list.
:param value:
:return:
"""
if isinstance(value, list):
value = set(value)
elif not isinstance(value, set):
value = set([value])
return value | c811729ea83dc1fbff7c76c8b596e26153aa68ee | 707,823 |
def dt642epoch(dt64):
"""
Convert numpy.datetime64 array to epoch time
(seconds since 1/1/1970 00:00:00)
Parameters
----------
dt64 : numpy.datetime64
Single or array of datetime64 object(s)
Returns
-------
time : float
Epoch time (seconds since 1/1/1970 00:00:00)
"""
return dt64.astype('datetime64[ns]').astype('float') / 1e9 | f7cdaf44312cb0564bf57393a5fde727bc24e566 | 707,824 |
import math
def calc_val_resize_value(input_image_size=(224, 224),
resize_inv_factor=0.875):
"""
Calculate image resize value for validation subset.
Parameters:
----------
input_image_size : tuple of 2 int
Main script arguments.
resize_inv_factor : float
Resize inverted factor.
Returns:
-------
int
Resize value.
"""
if isinstance(input_image_size, int):
input_image_size = (input_image_size, input_image_size)
resize_value = int(math.ceil(float(input_image_size[0]) / resize_inv_factor))
return resize_value | 5a8bcb77d849e62ef5ecfad74f5a3470ab4cfe59 | 707,825 |
def PyException_GetCause(space, w_exc):
"""Return the cause (another exception instance set by raise ... from ...)
associated with the exception as a new reference, as accessible from Python
through __cause__. If there is no cause associated, this returns
NULL."""
w_cause = space.getattr(w_exc, space.wrap('__cause__'))
if space.is_none(w_cause):
return None
return w_cause | dce5c1df12af7074ce25387e493ccac1aaac27ec | 707,829 |
def splitclass(classofdevice):
"""
Splits the given class of device to return a 3-item tuple with the
major service class, major device class and minor device class values.
These values indicate the device's major services and the type of the
device (e.g. mobile phone, laptop, etc.). If you google for
"assigned numbers bluetooth baseband" you might find some documents
that discuss how to extract this information from the class of device.
Example:
>>> splitclass(1057036)
(129, 1, 3)
>>>
"""
if not isinstance(classofdevice, int):
try:
classofdevice = int(classofdevice)
except (TypeError, ValueError):
raise TypeError("Given device class '%s' cannot be split" % \
str(classofdevice))
data = classofdevice >> 2 # skip over the 2 "format" bits
service = data >> 11
major = (data >> 6) & 0x1F
minor = data & 0x3F
return (service, major, minor) | 37c19ab17293b4fd0c46cff24c30e349459f7bd0 | 707,830 |
def get_positive(data_frame, column_name):
"""
Query given data frame for positive values, including zero
:param data_frame: Pandas data frame to query
:param column_name: column name to filter values by
:return: DataFrame view
"""
return data_frame.query(f'{column_name} >= 0') | 2aec7f611a1b181132f55f2f3ca73bf5025f2474 | 707,831 |
def pwr_y(x, a, b, e):
"""
Calculate the Power Law relation with a deviation term.
Parameters
----------
x : numeric
Input to Power Law relation.
a : numeric
Constant.
b : numeric
Exponent.
e : numeric
Deviation term.
Returns
-------
numeric
Output of Power Law relation.
Notes
-----
Power Law relation: :math:`y = a x^b + e`
"""
return a*x**b+e | e736d9bb2e4305ef0dc0a360143a611b805f7612 | 707,838 |
def parse_char(char, invert=False):
"""Return symbols depending on the binary input
Keyword arguments:
char -- binary integer streamed into the function
invert -- boolean to invert returned symbols
"""
if invert == False:
if char == 0:
return '.'
elif char == 1:
return '@'
if char == 0:
return '@'
elif char == 1:
return '.' | 38c0d1c150a1c8e8f7d2f3d1bde08ec3e5ceb65b | 707,841 |
def list_datasets(service, project_id):
"""Lists BigQuery datasets.
Args:
service: BigQuery service object that is authenticated. Example: service = build('bigquery','v2', http=http)
project_id: string, Name of Google project
Returns:
List containing dataset names
"""
datasets = service.datasets()
response = datasets.list(projectId=project_id).execute()
dataset_list = []
for field in response['datasets']:
dataset_list.append(field['datasetReference']['datasetId'])
return dataset_list | 2712e6a99427ce3b141e7948bba36e8e724f82bc | 707,842 |
from typing import List
from typing import Tuple
import bisect
def line_col(lbreaks: List[int], pos: int) -> Tuple[int, int]:
"""
Returns the position within a text as (line, column)-tuple based
on a list of all line breaks, including -1 and EOF.
"""
if not lbreaks and pos >= 0:
return 0, pos
if pos < 0 or pos > lbreaks[-1]: # one character behind EOF is still an allowed position!
raise ValueError('Position %i outside text of length %s !' % (pos, lbreaks[-1]))
line = bisect.bisect_left(lbreaks, pos)
column = pos - lbreaks[line - 1]
return line, column | 6b99e3b19ed1a490e4a9cc284f99e875085f819a | 707,846 |
def _rle_decode(data):
"""
Decodes run-length-encoded `data`.
"""
if not data:
return data
new = b''
last = b''
for cur in data:
if last == b'\0':
new += last * cur
last = b''
else:
new += last
last = bytes([cur])
return new + last | 8463ff6a20b3a39df7b67013d47fe81ed6d53477 | 707,850 |
def scale_log2lin(value):
"""
Scale value from log10 to linear scale: 10**(value/10)
Parameters
----------
value : float or array-like
Value or array to be scaled
Returns
-------
float or array-like
Scaled value
"""
return 10**(value/10) | 04f15a8b5a86a6e94dd6a0f657d7311d38da5dc0 | 707,851 |
def _error_to_level(error):
"""Convert a boolean error field to 'Error' or 'Info' """
if error:
return 'Error'
else:
return 'Info' | b43e029a4bb14b10de4056758acecebc85546a95 | 707,852 |
def add_review(status):
"""
Adds the flags on the tracker document.
Input: tracker document.
Output: sum of the switches.
"""
cluster = status['cluster_switch']
classify = status['classify_switch']
replace = status['replace_switch']
final = status['final_switch']
finished = status['finished_switch']
num = cluster + classify + replace + final + finished
return num | 8f2ba4cd8b6bd4e500e868f13733146579edd7ce | 707,853 |
import copy
def _make_reference_filters(filters, ref_dimension, offset_func):
"""
Copies and replaces the reference dimension's definition in all of the filters applied to a dataset query.
This is used to shift the dimension filters to fit the reference window.
:param filters:
:param ref_dimension:
:param offset_func:
:return:
"""
reference_filters = []
for ref_filter in filters:
if ref_filter.field is ref_dimension:
# NOTE: Important to apply the offset function to the start and stop properties because the date math can
# become expensive over many rows
ref_filter = copy.copy(ref_filter)
ref_filter.start = offset_func(ref_filter.start)
ref_filter.stop = offset_func(ref_filter.stop)
reference_filters.append(ref_filter)
return reference_filters | eeeeb74bb3618c87f3540de5b44970e197885dc6 | 707,858 |
def filter_pdf_files(filepaths):
""" Returns a filtered list with strings that end with '.pdf'
Keyword arguments:
filepaths -- List of filepath strings
"""
return [x for x in filepaths if x.endswith('.pdf')] | 3f44b3af9859069de866cec3fac33a9e9de5439d | 707,861 |
def istype(klass, object):
"""Return whether an object is a member of a given class."""
try: raise object
except klass: return 1
except: return 0 | bceb83914a9a346c59d90984730dddb808bf0e78 | 707,865 |
import re
def text_cleanup(text: str) -> str:
"""
A simple text cleanup function that strips all new line characters and
substitutes consecutive white space characters by a single one.
:param text: Input text to be cleaned.
:return: The cleaned version of the text
"""
text.replace('\n', '')
return re.sub(r'\s{2,}', ' ', text) | 84b9752f261f94164e2e83b944a2c12cee2ae5d8 | 707,867 |
from typing import OrderedDict
from typing import Counter
def profile_nominal(pairs, **options):
"""Return stats for the nominal field
Arguments:
:param pairs: list with pairs (row, value)
:return: dictionary with stats
"""
result = OrderedDict()
values = [r[1] for r in pairs]
c = Counter(values)
result['top'], result['freq'] = c.most_common(1)[0]
categories = list(c)
categories.sort()
result['categories'] = categories
result['categories_num'] = len(categories)
return result | 00ef211e8f665a02f152e764c409668481c748cc | 707,870 |
def compute( op , x , y ):
"""Compute the value of expression 'x op y', where -x and y
are two integers and op is an operator in '+','-','*','/'"""
if (op=='+'):
return x+y
elif op=='-':
return x-y
elif op=='*':
return x*y
elif op=='/':
return x/y
else:
return 0 | dbdf73a91bdb7092d2a18b6245ce6b8d75b5ab33 | 707,878 |
def get_indentation(line_):
"""
returns the number of preceding spaces
"""
return len(line_) - len(line_.lstrip()) | 23a65ba620afa3268d4ab364f64713257824340d | 707,880 |
from typing import List
from typing import Any
from typing import Optional
def jinja_calc_buffer(fields: List[Any], category: Optional[str] = None) -> int:
"""calculate buffer for list of fields based on their length"""
if category:
fields = [f for f in fields if f.category == category]
return max(len(f.to_string()) for f in fields) | c1f619acd8f68a9485026b344ece0c162c6f0fb0 | 707,882 |
def get_delete_op(op_name):
""" Determine if we are dealing with a deletion operation.
Normally we just do the logic in the last return. However, we may want
special behavior for some types.
:param op_name: ctx.operation.name.split('.')[-1].
:return: bool
"""
return 'delete' == op_name | 508a9aad3ac6f4d58f5890c1abc138326747ee51 | 707,883 |
def _mysql_int_length(subtype):
"""Determine smallest field that can hold data with given length."""
try:
length = int(subtype)
except ValueError:
raise ValueError(
'Invalid subtype for Integer column: {}'.format(subtype)
)
if length < 3:
kind = 'TINYINT'
elif length < 4:
kind = 'SMALLINT'
elif length < 7:
kind = 'MEDIUMINT'
elif length <= 10:
kind = 'INT'
else:
kind = 'BIGINT'
return '{}({})'.format(kind, length) | 3a0e84a3ac602bb018ae7056f4ad06fe0dcab53b | 707,885 |
def regularity(sequence):
"""
Compute the regularity of a sequence.
The regularity basically measures what percentage of a user's
visits are to a previously visited place.
Parameters
----------
sequence : list
A list of symbols.
Returns
-------
float
1 minus the ratio between unique and total symbols in the sequence.
"""
n = len(sequence)
n_unique = len(set(sequence))
if n_unique <= 1:
return 1.0
if n_unique == n:
return .0
return 1 - (n_unique / n) | e03d38cc3882ea5d0828b1f8942039865a90d49d | 707,886 |
def contains_whitespace(s : str):
"""
Returns True if any whitespace chars in input string.
"""
return " " in s or "\t" in s | c5dc974988efcfa4fe0ec83d115dfa7508cef798 | 707,887 |
import math
def divide_list(l, n):
"""Divides list l into n successive chunks."""
length = len(l)
chunk_size = int(math.ceil(length/n))
expected_length = n * chunk_size
chunks = []
for i in range(0, expected_length, chunk_size):
chunks.append(l[i:i+chunk_size])
for i in range(len(chunks), n):
chunks.append([])
return chunks | bad7c118988baebd5712cd496bb087cd8788abb7 | 707,888 |
def scheming_field_by_name(fields, name):
"""
Simple helper to grab a field from a schema field list
based on the field name passed. Returns None when not found.
"""
for f in fields:
if f.get('field_name') == name:
return f | ba4d04585b12ab941db8bc0787b076c32e76cadb | 707,894 |
def create_C1(data_set):
"""
Create frequent candidate 1-itemset C1 by scaning data set.
Args:
data_set: A list of transactions. Each transaction contains several items.
Returns:
C1: A set which contains all frequent candidate 1-itemsets
"""
C1 = set()
for t in data_set:
for item in t:
item_set = frozenset([item])
C1.add(item_set)
return C1 | 9f3deb61c6c3b982976c61c4247102431794daa8 | 707,898 |
def line_break(text, line_len=79, indent=1):
"""
Split some text into an array of lines.
Enter: text: the text to split.
line_len: the maximum length of a line.
indent: how much to indent all but the first line.
Exit: lines: an array of lines.
"""
lines = [text.rstrip()]
while len(lines[-1]) > line_len:
pos = lines[-1].rfind(' ', 0, line_len)
if pos < 0:
pos = line_len
lines[-1:] = [lines[-1][:pos].rstrip(), ' '*indent+lines[-1][
pos:].strip()]
return lines | 34b866109689796a4d428e7d3a68a34f7152250f | 707,900 |
def altsumma(f, k, p):
"""Return the sum of f(i) from i=k, k+1, ... till p(i) holds true or 0.
This is an implementation of the Summation formula from Kahan,
see Theorem 8 in Goldberg, David 'What Every Computer Scientist
Should Know About Floating-Point Arithmetic', ACM Computer Survey,
Vol. 23, No. 1, March 1991."""
if not p(k):
return 0
else:
S = f(k)
C = 0
j = k + 1
while p(j):
Y = f(j) - C
T = S + Y
C = (T - S) - Y
S = T
j += 1
return S | 952e77fcedfbe01658342126d95b79175c082976 | 707,902 |
def word_sorter(x):
"""
Function to sort the word frequency pairs after frequency
Lowest frequency collocates first - highest frerquency collocates last
"""
# getting length of list of word/frequency pairs
lst = len(x)
# sort by frequency
for i in range(0, lst):
for j in range(0, lst-i-1):
if (x[j][1] > x[j + 1][1]):
temp = x[j]
x[j]= x[j + 1]
x[j + 1] = temp
return(x) | 571570bb03d6473b9c6839aa6fdc0b1ba8efbe3c | 707,903 |
def GenerateConfig(_):
"""Returns empty string."""
return '' | ed42eb1c320ca1df25603a53d4abf4a1b14215f3 | 707,906 |
def get_maya_property_name(prop, ignore_channel=False):
"""
Given a property, return a reasonable Maya name to use for it.
If ignore_channel is True, return the property for the whole vector, eg. return
'.translate' instead of '.translateX'.
This doesn't create or query anything. It just generates a name to use elsewhere.
"""
prop_parts = prop.path.split('/')
# Get the property key, without any channel suffixes attached.
prop_key = prop_parts[0]
mapping = {
'translation': 'translate',
'rotation': 'rotate',
'scale': 'scale',
}
maya_key = None
if prop_key in mapping:
prop_key = mapping[prop_key]
if prop.path.count('/') == 1 and not ignore_channel:
# If we've been given a single channel, eg. rotation/x, return it.
assert len(prop_parts) == 2, prop_parts
assert prop_parts[1] in ('x', 'y', 'z'), prop_parts
return '%s%s' % (prop_key, prop_parts[1].upper())
else:
# Otherwise, return the vector itself.
return prop_key | 591a49f054db3936d5a345919a2c69491b6f345e | 707,911 |
def prefix_sums(A):
"""
This function calculate of sums of eements in given slice (contiguous segments of array).
Its main idea uses prefix sums which
are defined as the consecutive totals of the first 0, 1, 2, . . . , n elements of an array.
Args:
A: an array represents number of mushrooms growing on the
consecutive spots along a road.
Returns:
an array contains the consecutive sums of the first n elements of an array A
To use:
>> A=[2,3,7,5,1,3,9]
>> print(prefix_sums(A))
[0, 2, 5, 12, 17, 18, 21, 30]
Time Complexity: O(n)
"""
n = len(A)
P = [0] * (n + 1)
for k in range(1, n + 1):
P[k] = P[k - 1] + A[k - 1]
return P | d61e49eb4a973f7718ccef864d8e09adf0e09ce2 | 707,913 |
def sql2dict(queryset):
"""Return a SQL alchemy style query result into a list of dicts.
Args:
queryset (object): The SQL alchemy result.
Returns:
result (list): The converted query set.
"""
if queryset is None:
return []
return [record.__dict__ for record in queryset] | c55fa18773142cca591aac8ed6bdc37657569961 | 707,916 |
def float_to_bin(x, m_digits:int):
"""
Convert a number x in range [0,1] to a binary string truncated to length m_digits
arguments:
x: float
m_digits: integer
return:
x_bin: string
The decimal representation of digits AFTER '0.'
Ex:
Input 0.75 has binary representation 0.11
Then this function would return '11'
"""
if x < 0 or x >= 1:
raise ValueError("x must be in interval [0,1)")
x_round = round(x * 2**m_digits)
# print(x_round)
# print(2**m_digits)
if x_round == 2**m_digits:
x_round = 0
x_raw = bin(x_round)
x_bin = x_raw[2:].zfill(m_digits)
return x_bin | f95e72d9449b66681575b230f6c858e8b3833cc2 | 707,917 |
from typing import Callable
from typing import List
def apply(func: Callable, args: List):
"""Call `func` expanding `args`.
Example:
>>> def add(a, b):
>>> return a + b
>>> apply(add, [1, 2])
3
"""
return func(*args) | f866087d07c7c036b405f8d97ba993f12c392d76 | 707,918 |
def rhs_of_rule(rule):
""" This function takes a grammatical rule, and returns its RHS """
return rule[0] | 004b99ac97c50f7b33cc798997463a28c3ae9a6f | 707,922 |
def to_bin(val):
"""
Receive int and return a string in binary. Padded by 32 bits considering 2's complement for negative values
"""
COMMON_DIGITS = 32
val_str = "{:b}".format(val) # Count '-' in negative case
padded_len = len(val_str) + ((COMMON_DIGITS - (len(val_str) % COMMON_DIGITS)) % COMMON_DIGITS)
if val < 0:
val_2_complement = val & ((1 << padded_len) - 1)
final_val_str = "{:b}".format(val_2_complement)
else:
final_val_str = "0" * (padded_len - len(val_str)) + val_str
return(final_val_str) | 819d1c0a9d387f6ad1635f0fe0e2ab98b3ca17b0 | 707,923 |
def rgetattr(obj, attr):
"""
Get named attribute from an object, i.e. getattr(obj, 'a.a') is
equivalent to ``obj.a.a''.
- obj: object
- attr: attribute name(s)
>>> class A: pass
>>> a = A()
>>> a.a = A()
>>> a.a.a = 1
>>> rgetattr(a, 'a.a')
1
>>> rgetattr(a, 'a.c')
Traceback (most recent call last):
...
AttributeError: 'A' object has no attribute 'c'
"""
attrs = attr.split(".")
obj = getattr(obj, attrs[0])
for name in attrs[1:]:
obj = getattr(obj, name)
return obj | 5fb58634c4ba910d0a20753c04addf667614a07f | 707,925 |
def production(*args):
"""Creates a production rule or list of rules from the input.
Supports two kinds of input:
A parsed string of form "S->ABC" where S is a single character, and
ABC is a string of characters. S is the input symbol, ABC is the output
symbols.
Neither S nor ABC can be any of the characters "-", ">" for obvious
reasons.
A tuple of type (S, Seq, ...) where S is the symbol of some hashable
type and seq is an finite iterable representing the output symbols.
Naturally if you don't want to use characters/strings to represent
symbols then you'll typically need to use the second form.
You can pass multiple inputs to generate multiple production rules,
in that case the result is a list of rules, not a single rule.
If you pass multiple inputs the symbol must differ since a simple
L-System only supports one production rule per symbol.
Example:
>>> production("F->Ab[]")
('F', ['A', 'b', '[', ']'])
>>> production("F->Ab[]", ("P", "bAz"), (1, (0,1)))
[('F', ['A', 'b', '[', ']']), ('P', ['b', 'A', 'z']), (1, [0, 1])]
"""
if len(args) < 1:
raise ValueError("missing arguments")
res = []
for a in args:
if issubclass(str, type(a)):
parts = a.split(sep="->", maxsplit=1)
if len(parts) < 2:
raise ValueError("couldn't parse invalid string \"{}\"".format(a))
res.append((parts[0], list(parts[1])))
elif issubclass(tuple, type(a)):
s, to, *vals = a
res.append((s, list(to)))
else:
raise TypeError("sorry don't know what to do with " + str(type(a)))
if len(res) == 1:
return res[0]
return res | bcb3e415a283f654ab65e0656a3c7e3912eeb53b | 707,929 |
def flatten(items):
"""Convert a sequence of sequences to a single flat sequence.
Works on dictionaries, tuples, lists.
"""
result = []
for item in items:
if isinstance(item, list):
result += flatten(item)
else:
result.append(item)
return result | d44e3391f791dfd2ec9b323c37c510a415bb23bf | 707,930 |
def _is_src(file):
""" Returns true if the file is a source file
Bazel allows for headers in the srcs attributes, we need to filter them out.
Args:
file (File): The file to check.
"""
if file.extension in ["c", "cc", "cpp", "cxx", "C", "c++", "C++"] and \
file.is_source:
return True
return False | b0466073d4d1b05c5cab37946fb6ca8432dc752d | 707,934 |
def calculate_signal_strength(rssi):
# type: (int) -> int
"""Calculate the signal strength of access point."""
signal_strength = 0
if rssi >= -50:
signal_strength = 100
else:
signal_strength = 2 * (rssi + 100)
return signal_strength | d5a0955446e0fe0548639ddd1a849f7e7901c36b | 707,935 |
def lookAtThisMethod(
first_parameter,
second_paramter=None,
third_parameter=32,
fourth_parameter="a short string as default argument",
**kwargs
):
"""The point of this is see how it reformats parameters
It might be fun to see what goes on
Here I guess it should respect this spacing, since we are in a comment.
We are done!
"""
return kwargs["whatever"](
first_parameter * third_parameter,
second_paramter,
fourth_parameter,
"extra string because I want to",
) | 8dab028b40184bb7cf686c524d5abd452cee2bc3 | 707,936 |
def coord_for(n, a=0, b=1):
"""Function that takes 3 parameters or arguments, listed above, and returns a list of the interval division coordinates."""
a=float(a)
b=float(b)
coords = []
inc = (b-a)/ n
for x in range(n+1):
coords.append(a+inc*x)
return coords | 57e12200dcc113786c9deeb4865d7906d74c763f | 707,940 |
from typing import List
def init_anim() -> List:
"""Initialize the animation."""
return [] | 121fff8b4102c2961449d970307e762bd983bdbe | 707,947 |
def keep_digits(txt: str) -> str:
"""Discard from ``txt`` all non-numeric characters."""
return "".join(filter(str.isdigit, txt)) | 34387003ea03651dd2582b3c49f1095c5589167b | 707,948 |
import six
def validate_hatch(s):
"""
Validate a hatch pattern.
A hatch pattern string can have any sequence of the following
characters: ``\\ / | - + * . x o O``.
"""
if not isinstance(s, six.text_type):
raise ValueError("Hatch pattern must be a string")
unique_chars = set(s)
unknown = (unique_chars -
set(['\\', '/', '|', '-', '+', '*', '.', 'x', 'o', 'O']))
if unknown:
raise ValueError("Unknown hatch symbol(s): %s" % list(unknown))
return s | 4ddf056dab2681759a462005effc4ae5488a4461 | 707,950 |
def filter_example(config, example, mode="train"):
"""
Whether filter a given example according to configure.
:param config: config contains parameters for filtering example
:param example: an example instance
:param mode: "train" or "test", they differs in filter restrictions
:return: boolean
"""
if mode == "train":
return (len(example["ans_sent_tokens"]) > config.sent_limit or
len(example["ques_tokens"]) > config.ques_limit or
(example["y2_in_sent"] - example["y1_in_sent"]) >
config.ans_limit)
elif mode == "test":
return (len(example["ans_sent_tokens"]) > config.sent_limit or
len(example["ques_tokens"]) > config.ques_limit)
else:
print("mode must be train or test") | 9c49990fe36c0a82d0a99a62fe810a19cd5a8749 | 707,951 |
def _dict_flatten(data):
"""Return flattened dict of input dict <data>.
After https://codereview.stackexchange.com/revisions/21035/3
Parameters
----------
data : dict
Input dict to flatten
Returns
-------
fdata : dict
Flattened dict.
"""
def expand(key, value):
"""Expand list."""
if isinstance(value, dict):
return [(key+'>'+k, v) for k, v in _dict_flatten(value).items()]
else:
return [(key, value)]
return dict([item for k, v in data.items() for item in expand(k, v)]) | a1db4a552ced44efa45fe4f86fbfe04871463356 | 707,952 |
def _get_item(i, j, block):
"""
Returns a single item from the block. Coords must be in block space.
"""
return block[i, j] | 45a12ecb3959a75ad8f026616242ba64174441fc | 707,953 |
def check_all_rows(A):
"""
Check if all rows in 2-dimensional matrix don't have more than one queen
"""
for row_inx in range(len(A)):
# compute sum of row row_inx
if sum(A[row_inx]) > 1:
return False
return True | e39f4ca3e401c02b13c5b55ed4389a7e6deceb40 | 707,954 |
def get_experiment_type(filename):
"""
Get the experiment type from the filename.
The filename is assumed to be in the form of:
'<reliability>_<durability>_<history kind>_<topic>_<timestamp>'
:param filename: The filename to get the type.
:return: A string where the timesptamp is taken out from the filename.
"""
file_type = ''
filename = filename.split('/')[-1]
elements = filename.split('_')
for i in range(0, len(elements) - 3):
file_type += '{}_'.format(elements[i])
file_type = file_type[:-1]
return file_type | e1853a95d034b8f9e36ca65f6f5d200cbf4b86dc | 707,957 |
import pytz
def getAwareTime(tt):
"""
Generates timezone aware timestamp from timezone unaware timestamp
PARAMETERS
------------
:param tt: datatime
timezome unaware timestamp
RETURNS
------------
:return: datatime
timezone aware timestamp
"""
timezone = pytz.timezone("Europe/Amsterdam")
return (timezone.localize(tt)) | 1b286c92c7f5d8f0ff48d77296489fbd358c14ce | 707,958 |
def b64pad(b64data):
"""Pad base64 string with '=' to achieve a length that is a multiple of 4
"""
return b64data + '=' * (4 - (len(b64data) % 4)) | bdc14821bfbdbf220ff371fbe5e486d3e682337b | 707,965 |
def parse_copy_core_dump(raw_result):
"""
Parse the 'parse_copy_core_dump' command raw output.
:param str raw_result: copy core-dump raw result string.
:rtype: dict
:return: The parsed result of the copy core-dump to server:
::
{
0:{
'status': 'success'
'reason': 'core dump copied'
}
}
"""
if "Error code " in raw_result:
return {"status": "failed", "reason": "Error found while coping"}
if "No coredump found for" in raw_result:
return {"status": "failed", "reason": "no core dump found"}
if "Failed to validate instance ID" in raw_result:
return {"status": "failed", "reason": "instance ID not valid"}
if "ssh: connect to host" in raw_result:
return {"status": "failed", "reason": "ssh-connection issue for SFTP"}
if (
"copying ..." in raw_result and
"Sent " in raw_result and
"bytes" in raw_result and
"seconds" in raw_result
):
return {"status": "success", "reason": "core dump copied"}
else:
return {"status": "failed", "reason": "undefined error"} | 4ce168c9bc8c462ecc36beba889adb36cc64135d | 707,966 |
def compute_lifting_parameter(lamb, lambda_plane_idxs, lambda_offset_idxs, cutoff):
"""One way to compute a per-particle "4D" offset in terms of an adjustable lamb and
constant per-particle parameters.
Notes
-----
(ytz): this initializes the 4th dimension to a fixed plane adjust by an offset
followed by a scaling by cutoff.
lambda_plane_idxs are typically 0 or 1 and allows us to turn off an interaction
independent of the lambda value.
lambda_offset_idxs are typically 0 and 1, and allows us to adjust the w coordinate
in a lambda-dependent way.
"""
w = cutoff * (lambda_plane_idxs + lambda_offset_idxs * lamb)
return w | a9455ed67fcb21bcf1382fe66a77e0563f467421 | 707,967 |
def endgame_score_connectfour(board, is_current_player_maximizer) :
"""Given an endgame board, returns 1000 if the maximizer has won,
-1000 if the minimizer has won, or 0 in case of a tie."""
chains_1 = board.get_all_chains(current_player=is_current_player_maximizer)
chains_2 = board.get_all_chains(current_player= not(is_current_player_maximizer))
for chain in chains_1:
if len(chain) == 4:
return 1000
for chain in chains_2:
if len(chain) == 4:
return -1000
return 0 | bcb37381a9633377cb3405fbae45123e2a391df9 | 707,969 |
import copy
def identify_all_failure_paths(network_df_in,edge_failure_set,flow_dataframe,path_criteria):
"""Identify all paths that contain an edge
Parameters
---------
network_df_in - Pandas DataFrame of network
edge_failure_set - List of string edge ID's
flow_dataframe - Pandas DataFrame of list of edge paths
path_criteria - String name of column of edge paths in flow dataframe
Outputs
-------
network_df - Pandas DataFrame of network
With removed edges
edge_path_index - List of integer indexes
Of locations of paths in flow dataframe
"""
edge_path_index = []
network_df = copy.deepcopy(network_df_in)
for edge in edge_failure_set:
network_df = network_df[network_df.edge_id != edge]
edge_path_index += flow_dataframe.loc[flow_dataframe[path_criteria].str.contains(
"'{}'".format(edge))].index.tolist()
edge_path_index = list(set(edge_path_index))
return network_df, edge_path_index | db2da6ad20a4ae547c309ac63b6e68a17c3874e7 | 707,970 |
def load_from_input_flags(params, params_source, input_flags):
"""Update params dictionary with input flags.
Args:
params: Python dictionary of hyperparameters.
params_source: Python dictionary to record source of hyperparameters.
input_flags: All the flags with non-null value of overridden
hyperparameters.
Returns:
Python dict of hyperparameters.
"""
if params is None:
raise ValueError(
'Input dictionary is empty. It is expected to be loaded with default '
'values')
if not isinstance(params, dict):
raise ValueError(
'The base parameter set must be a Python dict, was: {}'.format(
type(params)))
for key in params:
flag_value = input_flags.get_flag_value(key, None)
if flag_value is not None:
params[key] = flag_value
params_source[key] = 'Command-line flags'
return params, params_source | 7ec8662f03469f1ed03f29c9f7e9663c49aa7056 | 707,972 |
def kml_start(params):
"""Define basic kml
header string"""
kmlstart = '''
<Document>
<name>%s</name>
<open>1</open>
<description>%s</description>
'''
return kmlstart % (params[0], params[1]) | c2fa4c1eeff086dfc3baa41ecd067634920b25b1 | 707,975 |
import math
def calc_distance(p1, p2):
""" calculates a distance on a 2d euclidean space, between two points"""
dist = math.sqrt((p2[0] - p1[0]) ** 2 + (p2[1] - p1[1]) ** 2)
return dist | d4005d44d5724c051860fb9aa2edeab1654157c6 | 707,980 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.