content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def get_printer(msg): """ returns a printer function, that prints information about a tensor's gradient Used by register_hook in the backward pass. :param msg: :return: printer function """ def printer(tensor): if tensor.nelement == 1: print("{} {}".format(msg, tensor)) else: print("{} shape: {}" "max: {} min: {}" "mean: {}" .format(msg, tensor.shape, tensor.max(), tensor.min(), tensor.mean())) return printer
0aefa7449452f791d0c6a15326044a0171c7faf4
18,660
def remove_protocol(addr): """ Removes the first occurrence of the protocol string ('://') from the string `addr` Parameters ---------- addr : str The address from which to remove the address prefix. Returns ------- str """ name = addr.split("://", 1) # maxsplit = 1... removes only the first occurrence name = ''.join(name[1:]) if len(name) > 1 else addr return name
f59c702be333671da3e9f202581acf6db3082c50
18,662
def find_duplicates(_list): """a more efficient way to return duplicated items ref: https://www.iditect.com/guide/python/python_howto_find_the_duplicates_in_a_list.html :arg list _list: a python list """ first_seen = set() first_seen_add = first_seen.add duplicates = set(i for i in _list if i in first_seen or first_seen_add(i)) # turn the set into a list (as requested) return duplicates
ed1ab9020d41b608dd6b8d3ce9e2448a096b1574
18,663
def first(iterable, default=None, key=None): """ Return the first truthy value of an iterable. Shamelessly stolen from https://github.com/hynek/first """ if key is None: for el in iterable: if el: return el else: for el in iterable: if key(el): return el return default
fd3eeeecf88b0dd29b3f689f61732e6502306016
18,664
def read_txt_file(file_name: str, encoding: str = "utf-8") -> str: """Reads a text file :param file_name: path :param encoding: encoding to use :return: str content """ with open(file_name, "r", encoding=encoding) as txt_file: return txt_file.read()
f97bd162596569b521121bd17da3edfdc04889d8
18,666
from typing import Dict def prepare_note_create_output(record: Dict) -> Dict: """ Prepares context output for user_details_get command. :param record: Dict containing note record. :return: prepared context output Dict. """ return { 'Id': record.get('Id', ''), 'WorkInfoType': record.get('WorkInfoType', ''), 'ViewAccess': record.get('ViewAccess', ''), 'Summary': record.get('Summary', ''), 'Submitter': record.get('Submitter', ''), 'srId': record.get('srId', ''), 'Notes': record.get('Notes', ''), 'ModifiedDate': record.get('ModifiedDate', ''), 'CreatedDate': record.get('CreatedDate', '') }
a0cc6f71dae7527f2c8088d349b79921f163d2d7
18,667
def query_interval_tree_by_type(interval_tree, time, type): """ Returns only items of specified type from interval tree at given time. """ all_intervals = interval_tree[time] selected_intervals = set() for interval in all_intervals: if isinstance(interval[-1], type): selected_intervals.add(interval.data) return selected_intervals
68366bfe9413cdecd8b3551b8070d1cc30646358
18,668
import re def parse_question_limits(question, for_items=False): """ Converts word and character length validators into JSON Schema-compatible maxLength and regex validators. """ limits = {} word_length_validator = next( iter(filter(None, ( re.match(r'under_(\d+)_words', validator['name']) for validator in question.get('validations', []) ))), None ) char_length_validator = next( iter(filter(None, ( re.search(r'([\d,]+)', validator['message']) for validator in question.get('validations', []) if validator['name'] == 'under_character_limit' ))), None ) char_length = question.get('max_length') or ( char_length_validator and char_length_validator.group(1).replace(',', '') ) word_length = question.get('max_length_in_words') or (word_length_validator and word_length_validator.group(1)) if char_length: limits['maxLength'] = int(char_length) if word_length: if not for_items and question.get('optional'): limits['pattern'] = r"^$|(^(?:\S+\s+){0,%s}\S+$)" % (int(word_length) - 1) else: limits['pattern'] = r"^(?:\S+\s+){0,%s}\S+$" % (int(word_length) - 1) return limits
3b9400f27755e5f93de52665d7e8f9d209ad5e30
18,672
def cm2inch(*tupl, scale=3): """ Convert cm to inch and scale it up. Parameters: *tupl: *tuple(floats) Measures in cm to convert and scale. scale: float Scale factor. Default: 3 """ inch = 2.54 if isinstance(tupl[0], tuple): return tuple(scale * i/inch for i in tupl[0]) else: return tuple(scale * i/inch for i in tupl)
d84b3e60ac8a7ae2f46818510d224923dc39c730
18,673
def get_id_data_abstract_role_mappings(id_data): """Get the logical and physical names of the access control roles defined by a resource group. :param id_data: - Data extracted from a custom resource's physical id by get_data_from_custom_physical_resource_id :returns A dictionary mapping abstract role names to physical role names. """ return id_data.setdefault('AbstractRoleMappings', {})
d93acd5988f068ed5a203e811326c33ab5f6b8aa
18,674
def normalizeName(name): """ Make normalized user name Prevent impersonating another user with names containing leading, trailing or multiple whitespace, or using invisible unicode characters. Prevent creating user page as sub page, because '/' is not allowed in user names. Prevent using ':' and ',' which are reserved by acl. @param name: user name, unicode @rtype: unicode @return: user name that can be used in acl lines """ username_allowedchars = "'@.-_" # ' for names like O'Brian or email addresses. # "," and ":" must not be allowed (ACL delimiters). # We also allow _ in usernames for nicer URLs. # Strip non alpha numeric characters (except username_allowedchars), keep white space name = ''.join([c for c in name if c.isalnum() or c.isspace() or c in username_allowedchars]) # Normalize white space. Each name can contain multiple # words separated with only one space. name = ' '.join(name.split()) return name
48ce83e2aef5e4bce993edd0e4cba230c2006641
18,683
def convert_to_int(byte_arr): """Converts an array of bytes into an array of integers""" result = [] for i in byte_arr: result.append(int.from_bytes(i, byteorder='big')) #introducem termenul liber result.insert(0, 1) # print(result) return result
03e7095dcf4d49e78ffa406e49b217e30bef67f4
18,684
def insertion_sort(lst): """ Sorts list using insertion sort :param lst: list of unsorted elements :return comp: number of comparisons """ comp = 0 for i in range(1, len(lst)): key = lst[i] j = i - 1 cur_comp = 0 while j >= 0 and key < lst[j]: lst[j + 1] = lst[j] j -= 1 cur_comp += 1 comp += cur_comp if cur_comp == 0: comp += 1 lst[j + 1] = key return comp
37b48a6f35828dfe88d6e329f93cec44b557136c
18,685
def error_rate(error_count, total): """ Calculate the error rate, given the error count and the total number of words. Args: error_count (int): Number of errors. total (int): Total number of words (of the same type). Returns: tuple (int, int, float): The error count, the total number of words, and the calculated error rate. """ if total == 0: return error_count, total, 0.0 return error_count, total, (error_count/total)*100
92f9f10952f86087edf251ccaf3f1005fdd6cb57
18,690
def _runCrossValidate(fitter): """ Top level function that runs crossvalidation for a fitter. Used in parallel processing. Parameters ---------- AbstractFitter Returns ------- lmfit.Parameters, score """ fitter.fit() score = fitter.score() return fitter.parameters, score
6dccc89e3ca43d98f417caf0c66d7581fcd77b31
18,691
def markdown_format(input_data) -> str: """ Format input into nice markdown accordingly Dict -> formatted list of key/value pairs Everything Else -> Str Args: input_data (mixed): Returns: str: """ if isinstance(input_data, dict): return "\n".join(["*{}*: {}".format(k, v) for k, v in input_data.items()]) else: return input_data
d8602b3dce84504fcf6ef0bfaa6cc1b357bb0548
18,692
import pytz def dt_to_ms(dt): """Converts a datetime to a POSIX timestamp in milliseconds""" if dt.tzinfo is None: dt = dt.replace(tzinfo=pytz.UTC) return int(dt.timestamp() * 1000)
aa3050ce15e09b9c1ddeb1edbda8c6e4275f3ce6
18,693
def strip_data(df): """Remove unused columns from the data.""" names = [' dwpf', ' relh', ' drct', ' sknt', ' p01i', ' alti', ' mslp', ' vsby', ' gust', ' skyc1', ' skyc2', ' skyc3', ' skyc4', ' skyl1', ' skyl2', ' skyl3', ' skyl4', ' presentwx', ' metar'] for colname in names: del df[colname] return df
127c472d0b56c9360d2ac05715f1e614ab735941
18,694
def get_url(year_of_study, session): """ :param year_of_study: 1, 2, 3 or 4. :param session: Examples: 20199 is fall 2019. 20195 is summer 2019. :return: """ return "https://student.utm.utoronto.ca/timetable/timetable?yos={0}&subjectarea=&session={1}&courseCode=&sname=&delivery=&courseTitle=".format( year_of_study, session)
371e1ad7b1d0d147ad906fd5f7fedd4d7d1f825d
18,698
def django_user_conversion(obj): """ Convert a Django user either by returning USERNAME_FIELD or convert it to str. """ if hasattr(obj, "USERNAME_FIELD"): return getattr(obj, getattr(obj, "USERNAME_FIELD"), None) else: return str(obj)
9e42f35f28799f732840a671ce9c16b67583ae19
18,702
def add_suffix(fname, suffix): """Adds a suffix to a file name.""" name, extension = fname.split(".") return name + "_" + suffix + "." + extension
53e8772bd5b974635010974d6373fbf5816ae520
18,709
def pos_pow(z,k): """z^k if k is positive, else 0.0""" if k>=0: return z**k else: return 0.0
ba0a46e8711005b9ac0dbd603f600dfaa67fbdd4
18,713
def getNPoly(object): """ Return the number polygons in the object """ return len(object[1])
01a2808768d34c7ad31285100ee8c5440092a19f
18,714
def sort_user_links(links): """Sorts the user's social/contact links. Args: links (list): User's links. Returns: Sorted list of links. """ return sorted(map(tuple, links), key=lambda kv: kv[0].lower())
1f9eb06af28def57c019fb026d9149f3f6f367b3
18,718
def deep_round(A, ndigits=5): """ Rounds numbers in a list of lists. Useful for approximate equality testing. """ return [[round(val, ndigits) for val in sublst] for sublst in A]
cebfb6b2dbe83bcc7222e0dc1b67ca98e95576c5
18,719
def nodestrength(mtx, mean=False): """ Compute the node strength of a graph. Parameters ---------- mtx : numpy.ndarray A matrix depicting a graph. mean : bool, optional If True, return the average node strength along the last axis of mtx. Returns ------- numpy.ndarray The node strength. """ ns = abs(mtx).sum(axis=0) if mean: ns = ns.mean(axis=-1) return ns
0b8a30a6b1ab2218368c0af0f6bf8036b819d431
18,723
def file_urls_mutation(dataset_id, snapshot_tag, file_urls): """ Return the OpenNeuro mutation to update the file urls of a snapshot filetree """ file_update = { 'datasetId': dataset_id, 'tag': snapshot_tag, 'files': file_urls } return { 'query': 'mutation ($files: FileUrls!) { updateSnapshotFileUrls(fileUrls: $files)}', 'variables': { 'files': file_update } }
ffa1ca42f5af7b93cfc6befb517b7909140b5b01
18,726
def get_marginal_topic_distrib(doc_topic_distrib, doc_lengths): """ Return marginal topic distribution p(T) (topic proportions) given the document-topic distribution (theta) `doc_topic_distrib` and the document lengths `doc_lengths`. The latter can be calculated with `get_doc_lengths()`. """ unnorm = (doc_topic_distrib.T * doc_lengths).sum(axis=1) return unnorm / unnorm.sum()
ebcb87a5ddb5e5e2e3c2446134c6ef2ab8a945fa
18,729
from typing import OrderedDict def replicate(d: OrderedDict): """ convert a dict with (element, count) into a list with each element replicated count many times """ l = [] for element, count in d.items(): l.extend([element]*count) return l
276d7ea922c645d689a3ec70247427d031e0fa34
18,731
def get_count_limited_class(classes, class_name, min=1, max=1): """ Find a class in an iterator over classes, and constrain its count Args: classes (:obj:`iterator`): an iterator over some classes class_name (:obj:`str`): the desired class' name min (:obj:`int`): the fewest instances of a class named `class_name` allowed max (:obj:`int`): the most instances of a class named `class_name` allowed Returns: :obj:`type`: the class in `classes` whose name (`__name__`) is `class_name`; if no instances of class are allowed, and no instances are found in `classes`, then return `None` Raises: :obj:`ValueError`: if `min` > `max, or if `classes` doesn't contain between `min` and `max`, inclusive, class(es) whose name is `class_name`, or if `classes` contains multiple, distinct classes with the name `class_name` """ if min > max: raise ValueError("min ({}) > max ({})".format(min, max)) matching_classes = [cls for cls in classes if cls.__name__ == class_name] if len(matching_classes) < min or max < len(matching_classes): raise ValueError("the number of members of 'classes' named '{}' must be in [{}, {}], but it is {}".format( class_name, min, max, len(matching_classes))) # confirm that all elements in matching_classes are the same unique_matching_classes = set(matching_classes) if 1 < len(unique_matching_classes): raise ValueError("'classes' should contain at most 1 class named '{}', but it contains {}".format( class_name, len(unique_matching_classes))) if matching_classes: return matching_classes[0] return None
4512bb22aa2ac1632813bea8f834785b9b7c5c20
18,735
import requests def post_splunk(url, token, payload): """ Send a post request to Splunk API """ headers = {'Authorization': 'Splunk {}'.format(token)} res = requests.post(url=url, headers=headers, data=payload, verify=False) res.raise_for_status() return res.json()
45310cabaf65004217cd2bcd0196314cb4fbabd7
18,737
import math def nu_e(n_n, n_e, T_e): """approximate calculation of electron collision frequency from Kelly 89 Parameters ---------- n_n : (float) neutral density cm-3 n_e : (float) electron density cm-3 T_e : (float) electron temperature K """ nu_e_n = 5.4 * 10**(-10) * n_n * T_e**(1/2) nu_e_i = (34 + 4.18 * math.log(T_e**3 / n_e)) * n_e * T_e**(-3/2) return nu_e_n + nu_e_i
3c73538dd97a4f03d0a98d8fe4427697089234e8
18,740
import math def line(p0, p1): """ Create line between two points based on Bresenham algorithm """ steep = False x0 = p0[0] y0 = p0[1] x1 = p1[0] y1 = p1[1] if math.fabs(x0 - x1) < math.fabs(y0 - y1): x0, y0 = y0, x0 x1, y1 = y1, x1 steep = True if x0 > x1: x0, x1 = x1, x0 y0, y1 = y1, y0 dx = x1 - x0 dy = y1 - y0 if dx == 0: derror = 0.1 else: derror = math.fabs(dy / dx) error = 0.0 y = y0 x = x0 points = [] while x <= x1: points.append((y, x) if steep else (x, y)) error += derror if error > 0.5: y += 1 if y1 > y0 else -1 error -= 1. x += 1 return points
3f590e2416280dc67467b6d08bc39b50e84e0717
18,741
def ltd(balance_df): """Checks if the current LTD (Long Term Debt) was reduced since previous year Explanation of LTD: https://www.investopedia.com/terms/l/longtermdebt.asp balance_df = Balance Sheet of the specified company """ lt_debt_curr = balance_df.iloc[balance_df.index.get_loc("Long Term Debt"),0] lt_debt_prev = balance_df.iloc[balance_df.index.get_loc("Long Term Debt"),1] if (lt_debt_curr < lt_debt_prev): return True else: return False
3a43098da821e96d605f3bf579e0ca8e11d1a66b
18,752
def quick_sort(arr): """Sort array of numbers with quicksort.""" if len(arr) == 1: return arr if len(arr) > 1: pivot = arr[0] left = 1 right = len(arr) - 1 while left <= right: if arr[left] > pivot and arr[right] < pivot: arr[left], arr[right] = arr[right], arr[left] left += 1 right -= 1 elif arr[left] <= pivot and arr[right] < pivot: left += 1 elif arr[left] > pivot and arr[right] >= pivot: right -= 1 elif arr[left] <= pivot and arr[right] >= pivot: left += 1 right -= 1 arr[0], arr[right] = arr[right], arr[0] divider = right + 1 first = quick_sort(arr[:right]) second = quick_sort(arr[divider:]) return first + [arr[right]] + second else: return arr
9f40258588967247379d50532dd62ec0588365b1
18,753
def _include_exclude_list(include, exclude): """ create the list of queries that would be checked for include or exclude """ keys = [] if include: for item in include: keys.append((item, 'included')) if exclude: for item in exclude: keys.append((item, 'excluded')) return keys
ed968d56d6bb095de118f6526e277a54e1610cc4
18,754
def fetch_specie(specie_url, fetcher): """ Get Data of a specie. """ return fetcher(specie_url)
5882affad01a750d2afb5ca047a9f82872d03c56
18,755
import pickle def load_pik(pikFile): """Convenience function for simple loading of pickle files """ with open(pikFile, 'rb') as fid: d = pickle.load(fid) return d
f70f08bbf98fddad7bc1d99dee2049b8525c13e5
18,759
def classImplements(c, ms): """ c is a class, and ms is a set of method names. Returns True if c implements all the methods in c. Complains otherwise, and returns False """ result = True for n in ms: m = getattr(c, n, False) if not (m and callable(m)): print(c, "does not have method", n) result = False return result
e459e9a50cce501356d5494fb616cfa4df32fff7
18,760
def datetime_to_grass_datetime_string(dt): """Convert a python datetime object into a GRASS datetime string .. code-block:: python >>> import grass.temporal as tgis >>> import dateutil.parser as parser >>> dt = parser.parse("2011-01-01 10:00:00 +01:30") >>> tgis.datetime_to_grass_datetime_string(dt) '01 jan 2011 10:00:00 +0090' >>> dt = parser.parse("2011-01-01 10:00:00 +02:30") >>> tgis.datetime_to_grass_datetime_string(dt) '01 jan 2011 10:00:00 +0150' >>> dt = parser.parse("2011-01-01 10:00:00 +12:00") >>> tgis.datetime_to_grass_datetime_string(dt) '01 jan 2011 10:00:00 +0720' >>> dt = parser.parse("2011-01-01 10:00:00 -01:30") >>> tgis.datetime_to_grass_datetime_string(dt) '01 jan 2011 10:00:00 -0090' """ # GRASS datetime month names month_names = ["", "jan", "feb", "mar", "apr", "may", "jun", "jul", "aug", "sep", "oct", "nov", "dec"] if dt is None: raise Exception("Empty datetime object in datetime_to_grass_datetime_string") # Check for time zone info in the datetime object if dt.tzinfo is not None: tz = dt.tzinfo.utcoffset(0) if tz.seconds > 86400 / 2: tz = (tz.seconds - 86400) / 60 else: tz = tz.seconds/60 string = "%.2i %s %.2i %.2i:%.2i:%.2i %+.4i" % (dt.day, month_names[dt.month], dt.year, dt.hour, dt.minute, dt.second, tz) else: string = "%.2i %s %.4i %.2i:%.2i:%.2i" % (dt.day, month_names[ dt.month], dt.year, dt.hour, dt.minute, dt.second) return string
47495bb7a26fda2dfc40b1cbdbff045030fecc31
18,761
def flip_index(i, n): """Reorder qubit indices from largest to smallest. >>> from sympy.physics.quantum.qasm import flip_index >>> flip_index(0, 2) 1 >>> flip_index(1, 2) 0 """ return n-i-1
c6b8f7143bda5cdf80c7994041536b25d142c3cc
18,763
def _splitquery(url): """splitquery('/path?query') --> '/path', 'query'.""" path, delim, query = url.rpartition('?') if delim: return path, query return url, None
ea1d04538d90139bc257a439a0d6bf02e6b15b13
18,768
from typing import List def position_util(cmd_line: List[str], word_position: int, word_before_cursor: str) -> bool: """ Util method for autocompletion conditions. Makes autocomplete work well. :param cmd_line: the list of command line words :param word_position: the position of the word we are attempting to autocomplete :param word_before_cursor: word_before_cursor parsed from the document :return: True if we should try to autocomplete this word. """ # Special case for no characters typed yet (we send in [''] as cmd_line which fucks with the logic) if word_position == 1 and len(cmd_line) == 1 and cmd_line[0] == '': return True # Don't keep completing after the word position # Don't complete if we just hit space after the word position # Don't complete on the previous word position until there is a space return len(cmd_line) < word_position + 1\ and not (len(cmd_line) == word_position and word_before_cursor == '')\ and not (len(cmd_line) == word_position - 1 and word_before_cursor != '')
463bb420d7a7cc1aa336b8d664194cc99b5c4dcb
18,774
def get_fmriprep_outlier_volumes_from_confounds(confounds_df): """extract which volume numbers are outliers from the fmriprep confounds df. Returns: bad_volumes: list eg [34, 35, 100, 150] """ # get the motion columns motion = confounds_df.filter(regex='motion') # find any rows with values above 0 return_df = motion[(motion > 0).any(1)] # return the index (row names) of this df return list(return_df.index)
b62d833ec2b7f000584354ca6470863acb33682c
18,777
def _priority_connection(priority_route:dict, cost_matrix, mean_routes, std_routes, mean_weight, std_weigth, distance_weight) -> dict: """ Give us the priority connections dictionary for each route Parameters ----------- priority_route: dict The dictionary with the priority routes cost_matrix: dict of dicts it's a dict of dicts with the points to visit as a keys and the value is another dictionary with the points to visit and the value of going for the first point to the second. It's a dictionary representation of a matrix. mean_routes: dict A dict with the mean distance/cost of all points one each other std_routes: dict A dict with the standar deviation distance/cost of all points one each other mean_weight: float The ponderation of the mean for all points, higher number make points with higher mean (more distance from others) have large values and priorice their connections std_weight: float The ponderation of the standar deviation for all points, higher number make points with higher deviation (more diferents of distance between points) have larger values and priorice their connections distance_weight: float The ponderation of the distance between all points, higher distance_weight make points with higher distances between them have large values Return ------- A dict of dicts every point connections ordered by priority """ dict_prior_connections_ordered = {} base_order = priority_route.keys() for id_route in base_order: prior_connections = {} for given_route in base_order: if id_route != given_route: prior_connections[given_route] = ((mean_routes[given_route]**mean_weight) * (std_routes[given_route]**std_weigth)) / cost_matrix[given_route][id_route] ** distance_weight dict_prior_connections_ordered[id_route] = dict(sorted(prior_connections.items(), reverse=True, key=lambda x: x[1])) return dict_prior_connections_ordered
b4be98cd4fe07b3592e4fa44a63d38195a7dfd05
18,779
import jinja2 def fillHTMLTemplate(templateString, params): """Invokes the jinja2 methods to fill in the slots in the template.""" templateObject = jinja2.Template(templateString) htmlContent = templateObject.render(params) return htmlContent
bba20da7a5411bf8b252fffcdb108516a2d5afa9
18,790
def check_above_or_below(considering_pt, remaining_pt_1, remaining_pt_2): """ This function is used to check if the considering point is above or below the line connecting two remaining points.\n 1: above\n -1: below """ orthogonal_vector = remaining_pt_2 - remaining_pt_1 line_connecting_pt1_and_pt2 = -orthogonal_vector[1] * (considering_pt[0] - remaining_pt_1[0]) \ + orthogonal_vector[0] * (considering_pt[1] - remaining_pt_1[1]) if line_connecting_pt1_and_pt2 > 0: return 1 return -1
0da67abc45356260580beec22f37a56bd5f43398
18,792
from typing import Dict from typing import Any def _count_populations_in_params(params: Dict[str, Any], prefix: str) -> int: """ Counts the number of electron or ion populations in a ``params`` `dict`. The number of populations is determined by counting the number of items in the ``params`` `dict` with a key that starts with the string defined by ``prefix``. """ return len([key for key in params if key.startswith(prefix)])
0678e198f6e5562eb26e8d2c68aad14422d0f820
18,801
def gps_to_utc(gpssec): """ Convert GPS seconds to UTC seconds. Parameters ---------- gpssec: int Time in GPS seconds. Returns ------- Time in UTC seconds. Notes ----- The code is ported from Offline. Examples -------- >>> gps_to_utc(0) # Jan 6th, 1980 315964800 """ kSecPerDay = 24 * 3600 kUTCGPSOffset0 = (10 * 365 + 7) * kSecPerDay kLeapSecondList = ( ((361 + 0 * 365 + 0 + 181) * kSecPerDay + 0, 1), # 1 JUL 1981 ((361 + 1 * 365 + 0 + 181) * kSecPerDay + 1, 2), # 1 JUL 1982 ((361 + 2 * 365 + 0 + 181) * kSecPerDay + 2, 3), # 1 JUL 1983 ((361 + 4 * 365 + 1 + 181) * kSecPerDay + 3, 4), # 1 JUL 1985 ((361 + 7 * 365 + 1) * kSecPerDay + 4, 5), # 1 JAN 1988 ((361 + 9 * 365 + 2) * kSecPerDay + 5, 6), # 1 JAN 1990 ((361 + 10 * 365 + 2) * kSecPerDay + 6, 7), # 1 JAN 1991 ((361 + 11 * 365 + 3 + 181) * kSecPerDay + 7, 8), # 1 JUL 1992 ((361 + 12 * 365 + 3 + 181) * kSecPerDay + 8, 9), # 1 JUL 1993 ((361 + 13 * 365 + 3 + 181) * kSecPerDay + 9, 10), # 1 JUL 1994 ((361 + 15 * 365 + 3) * kSecPerDay + 10, 11), # 1 JAN 1996 ((361 + 16 * 365 + 4 + 181) * kSecPerDay + 11, 12), # 1 JUL 1997 ((361 + 18 * 365 + 4) * kSecPerDay + 12, 13), # 1 JAN 1999 # DV: 2000 IS a leap year since it is divisible by 400, # ie leap years here are 2000 and 2004 -> leap days = 6 ((361 + 25 * 365 + 6) * kSecPerDay + 13, 14), # 1 JAN 2006 ((361 + 28 * 365 + 7) * kSecPerDay + 14, 15), # 1 JAN 2009 ((361 + 31 * 365 + 8 + 181) * kSecPerDay + 15, 16), # 1 JUL 2012 ((361 + 34 * 365 + 8 + 181) * kSecPerDay + 16, 17), # 1 JUL 2015 ((361 + 36 * 365 + 9) * kSecPerDay + 17, 18) # 1 JAN 2017 ) leapSeconds = 0 for x in reversed(kLeapSecondList): if gpssec >= x[0]: leapSeconds = x[1] break return gpssec + kUTCGPSOffset0 - leapSeconds
342c479df8d4c864592494e8d50452e9f02c04d5
18,804
def get_assoc_scheme_parameter(assoc_scheme): """ assoc_scheme - 3B, 3C, etc. Output: param - Thermopack parameter """ if assoc_scheme.upper() == "NONE": param = "no_assoc" else: param = "assoc_scheme_{}".format(assoc_scheme) return param
b0f7a09d157dcaa466cec2b2a47684a1ee2df80b
18,806
import requests from bs4 import BeautifulSoup def get_page_soup(url): """ Returns html of a given url Parameters: url (string): complete url Returns: html: pageSoup """ headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36'} pageTree = requests.get(url, headers=headers) pageSoup = BeautifulSoup(pageTree.content, 'html.parser') return pageSoup
efbdbb92e0b93bf6390541a9608855da266d7682
18,808
import torch def clip_boxes_to_image(boxes, size): """copy from torchvision.ops.boxes Clip boxes so that they lie inside an image of size `size`. Arguments: boxes (Tensor[N, 4]): (left, top, right, bottom) size (Tuple(H, W)): size of the image Returns: clipped_boxes (Tensor[N, 4]) """ dim = boxes.dim() boxes_x = boxes[..., 0::2] boxes_y = boxes[..., 1::2] height, width = size boxes_x = boxes_x.clamp(min=0, max=width - 1) boxes_y = boxes_y.clamp(min=0, max=height - 1) clipped_boxes = torch.stack((boxes_x, boxes_y), dim=dim) return clipped_boxes.reshape(boxes.shape)
139e865f8137f1529141546a9d71877599adacd1
18,811
def extend_schema_field(field): """ Decorator for the "field" kind. Can be used with ``SerializerMethodField`` (annotate the actual method) or with custom ``serializers.Field`` implementations. If your custom serializer field base class is already the desired type, decoration is not necessary. To override the discovered base class type, you can decorate your custom field class. Always takes precedence over other mechanisms (e.g. type hints, auto-discovery). :param field: accepts a ``Serializer`` or :class:`~.types.OpenApiTypes` """ def decorator(f): if not hasattr(f, '_spectacular_annotation'): f._spectacular_annotation = {} f._spectacular_annotation['field'] = field return f return decorator
91f665f927357813f43963ce9c348f8da7a2c5c8
18,812
def node_region(patched_ast_node): """Get the region of a patched ast node""" return patched_ast_node.region
870fc43f1b635d5b5eb5e5a4ffa80ab322af168d
18,818
def _extract_target(data, target_col): """Removes the target column from a data frame, returns the target col and a new data frame minus the target.""" target = data[target_col] train_df = data.copy() del train_df[target_col] return target, train_df
726a76e254c1d6dc120b590d9d7128986ddc9227
18,823
def score_compressed_state(total, trumps): """ Calculate score from compressed state """ score = 0 for num_trumps in range(trumps, -1, -1): score = total + num_trumps * 10 if score <= 31: break return score
0d6652174c2f3f409431c15f38874b8057b94921
18,824
from typing import List from typing import Dict import collections def merge_series(list_of_metas: List[Dict]) -> Dict: """Merge series with the same SeriesUID. """ result = collections.defaultdict(list) for metas in list_of_metas: result[metas["SeriesInstanceUID"]].append(metas) return result
42b23b504c6e973913f93da2e86f71f8ea9baec2
18,833
def _rlimit_min(one_val, nother_val): """Returns the more stringent rlimit value. -1 means no limit.""" if one_val < 0 or nother_val < 0 : return max(one_val, nother_val) else: return min(one_val, nother_val)
ba286aa6b36a53d691a13b775bbbcfd8722c8b8e
18,834
def _get_fields(line, delimiter=' ', remove_fields=['', '\n']): """Gets the fields from a line delimited by delimiter and without entries in remove_fields Parameters ---------- line : str Line to find the fields delimiter : str Text separating fields in string remove_fields : list of str Fields to delete Returns ------- fields : list of str Fields of the line """ for remove_field in remove_fields: line = line.replace(remove_field, '') all_fields = line.split(delimiter) fields = [] for field in all_fields: if all([field != remove_field for remove_field in remove_fields]): # Add field if it does not match any of the remove_fields fields.append(field) return fields
adfa2e6a1be18049b7b956bef7a0f3c121120641
18,839
def dict_alert_msg(form_is_valid, alert_title, alert_msg, alert_type): """ Function to call internal alert message to the user with the required paramaters: form_is_valid[True/False all small letters for json format], alert_title='string', alert_msg='string', alert_type='success, error, warning, info' """ data = { 'form_is_valid': form_is_valid, 'alert_title': alert_title, 'alert_msg': alert_msg, 'alert_type': alert_type } return data
f9f82c9b809be2ad1d6d872d6e00610a5ebc3493
18,840
def check_no_overlap(op_start_times:list, machines_ops_map:dict, processing_time:dict): """ Check if the solution violates the no overlap constraint. Returns True if the constraint is violated. Keyword arguments: op_start_times (list): Start times for the operations machines_ops_map(dict): Mapping of machines to operations processing_time (dict): Operation processing times """ pvals = list(processing_time.values()) # For each machine for ops in machines_ops_map.values(): machine_start_times = [op_start_times[i] for i in ops] machine_pvals = [pvals[i] for i in ops] # Two operations start at the same time on the same machine if len(machine_start_times) != len(set(machine_start_times)): return True # There is overlap in the runtimes of two operations assigned to the same machine machine_start_times, machine_pvals = zip(*sorted(zip(machine_start_times, machine_pvals))) for i in range(len(machine_pvals) - 1): if machine_start_times[i] + machine_pvals[i] > machine_start_times[i+1]: return True return False
90736c066987466adcf641f9999542466c17fc8e
18,842
import requests def shock_download(url, token): """ Download data from a Shock node. Parameters ---------- url : str URL to Shock node token : str Authentication token for Patric web services Returns ------- str Data from Shock node """ response = requests.get(url + '?download', headers={'Authorization': 'OAuth ' + token}, verify=False) if response.status_code != requests.codes.OK: response.raise_for_status() return response.text
7a2e9855ca807892cef16af370755eac3644f9f5
18,851
def create_html_email_href(email: str) -> str: """ HTML version of an email address :param email: the email address :return: email address for use in an HTML document """ return f'<a href="mailto:{email}">{email}</a>' if email else ""
c72bb76f3b1fc3d30571a879d1b8172d8c4e77cd
18,858
def dummyListener(source, intent): """Sample intent listener Intent listeners (see :any:`registerIntentListener`) should follow this function prototype. The function can handle `intent` and perform appropriate action if desired. If the function handled the intent, it should return True to mark the intent has having been already processed. Consequently, no more callbacks are called for this intent, to avoid it being handled multiple times. If the function handled the intent and returned a "truthy" value, but it did not call :any:`Intent.accept`, the Intent is automatically accepted and the value returned by the function is considered to be the `Intent` result (:any:`Intent.result`). If the function didn't handle the intent, it should return False, so other callbacks have a chance to handle it. :param source: object which sent the intent :type source: QObject :param intent: :type intent: IntentEvent :returns: True if the listener handled the intent, else False :rtype: bool """ return False
c994b08f5d409debe80fb9f8a29380355a190e12
18,860
import sqlite3 def isSqlite3DB(filepath): """ Returns whether file at filepath is a sqlite3 database. Args: filepath (str): The file to check. Returns: bool: Whether the database could be opened and queried. """ try: conn = sqlite3.connect(filepath) conn.execute("pragma schema_version;") except Exception: return False conn.close() return True
54a0c42faea75ffa9d2571b78976db9116c81207
18,862
def get_sea_attribute_cmd(seaname): """ Get pvid, pvid_adapter, and virt_adapters from the configured SEA device. Also get the state of the SEA. :param seaname: sea device name :returns: A VIOS command to get the sea adapter's attributes. """ return ("ioscli lsdev -dev %(sea)s -attr pvid,pvid_adapter,virt_adapters;" "ioscli lsdev -type sea | grep %(sea)s" % {'sea': seaname})
18224e14e45b73ff716f4282aaff3e06c4584866
18,867
def zzx_neg(f): """Negate a polynomial in Z[x]. """ return [ -coeff for coeff in f ]
c813597dc9540c8d85221352da10db894de4aa4c
18,877
def get_q_values(node): """ Return all triples (state, action, q) Parameters ---------- node : Node Initial node. Returns ------- Recursively transverse the tree, and return all triples (state, action, q) """ if node.children == None: return [[node.state_val, node.action_val, node.qVal]] else: q_values = [] for c in node.children: q_values.extend(get_q_values(c)) return q_values
8c3015f3b9c44ec7bfaaaf8833b39d167598f2bc
18,890
def change_linewidth(ax, lw=3): """change linewidth for each line plot in given axis Parameters ---------- ax : mpl.axis axis to change fontsize of lw : float, optional [description], by default 3 Returns ------- ax mpl.axis Examples -------- fig, ax = plt.subplots(1, 1) x = np.arange(10) y = np.arange(10) ax.plot(x, y, x + 1, y, x -1, y ) change_linewidth(ax, 3) """ for item in ax.lines: item.set_linewidth(lw) return ax
b92294878351b6f251c99f43295a8e8e56995cd1
18,893
def find_upper_bound(x): """ find_upper_bound returns an integer n with n >= len(x), without using the len() function. The complexity is O(log(len(x)). """ n = 1 while True: try: v = x[n] except IndexError: return n n *= 2
4251791d0e270f29bc9f4d26bbf81c2292ffa50c
18,896
def yes_or_no(question): """Asks a yes or no question, and captures input. Blank input is interpreted as Y.""" reply = str(input(question+' (Y/n): ')).capitalize().strip() if reply == "": #pylint: disable=no-else-return return True elif reply[0] == 'Y': return True elif reply[0] == 'N': return False return yes_or_no("Please enter Y or N.")
f441e39b85dc7407bedce5c120aa9839f0a3064f
18,897
def get_material_nodes_by_type(material, bl_idname): """ Find material nodes with bl_idname type name """ return (node for node in material.node_tree.nodes if node.bl_idname == bl_idname)
4406bf09a0d44ecb29e917dc7ff7d78b179cfbf8
18,898
def matching(tr_synth, tr_seis): """ matching zeroes all values of the seismic trace `tr_seis` outside the limits of the synthetic trace `tr_synth` Parameters ---------- tr_synth : numpy.array The synthetic trace tr_seis : numpy.array The seismic trace Returns ------- tr_seis_new: numpy.array Returns the new seismic trace (with zeros outside the area of comparison) """ tr_seis_new = tr_seis.copy() i = 0 while tr_synth[i] == 0 and i < len(tr_seis): tr_seis_new[i] = 0 i += 1 i = len(tr_seis)-1 while tr_synth[i] == 0 and i >= 0: tr_seis_new[i] = 0 i -= 1 return tr_seis_new
55527b1302fd099e88353ea7c1ee447633a5a494
18,904
def booleanize_if_possible(sample): """Boolean-ize truthy/falsey strings.""" if sample.lower() in ['true', 'yes', 'on', 1]: sample = True elif sample.lower() in ['false', 'no', 'off', 0]: sample = False return sample
10ffb3481f15a7548512f01266027977a61a7e13
18,912
def normalize_feature_for_target_col(in_df, str_norm_target_col, abs_max_num): """ normalize designated column e.g. normalize col with max "60" [20, 30, 70, 65, -90] -> [0.333..., 0.5, 1.0, 1.0, -1.0] :param in_df : pandas.DataFrame, :param str_norm_target_col : string, :param abs_max_num : float, absolute max number (normalize col with this number) :return out_df : pandas.DataFrame """ assert(abs_max_num > 0), ( 'Please set positive number in "abs_max_num".' ) # find target exceeding abs_max_num and overwrite the num with abs_max_num df = in_df # positive cond = (df.loc[:, str_norm_target_col] >= abs_max_num) df.loc[cond, str_norm_target_col] = abs_max_num # negative cond = (df.loc[:, str_norm_target_col] <= -abs_max_num) df.loc[cond, str_norm_target_col] = -abs_max_num # normalization df.loc[:, str_norm_target_col] = df.loc[:, str_norm_target_col] / abs_max_num out_df = df return out_df
63eaa8065d6485d6bdacb850d318ed993225204f
18,913
import collections import operator def dnsbl_hit_count(log_data): """Counts how many hosts were found in each dnsbl.""" y = collections.defaultdict(int) for v in log_data.values(): for bl in v: y[bl] += 1 return sorted(y.items(), key=operator.itemgetter(1), reverse=True)
6a03054b7f50b1cbb27e58e4edc1212bb7cc2abf
18,915
def resize_lane(lane, x_ratio, y_ratio): """Resize the coordinate of a lane accroding image resize ratio. :param lane: the lane need to be resized :type lane: a list of dicts :param x_ratio: correspond image resize ratio in x axes. :type x_ratio: float :param y_ratio: correspond image resize ratio in y axes. :type y_ratio: float :return: resized lane :rtype: list """ return [{"x": float(p['x']) / x_ratio, "y": float(p['y']) / y_ratio} for p in lane]
23dacea6ac9820b73fad124433630a991ea14d37
18,916
def _attrfilter(label, value, expr): """ Build an `attrfilter(<label>, <value>, <expr>)` type query expression. `expr` is a query expression of any supported type. """ return "attrfilter({label}, {value}, {expr})".format( label = label, value = value, expr = expr, )
38dfc19bf043a9c327665c324f96d7f205ea6416
18,922
def genererate_principal(primary: str, instance: str, realm: str) -> str: """ Generate a Kerberos principal from the three different components. """ if instance: principal = "{}/{}".format(primary, instance) else: principal = primary return "{}@{}".format(principal, realm.upper())
a39055e2029f044ce50107cb58860465491a5333
18,926
import re def check_password_complexity(email, password): """ Check that a password meets the minimum complexity requirements, returning True if the requirements are satisfied, False otherwise. The rules are: - minimum length 10 - at least one lowercase letter - at least one uppercase letter - at least one numeric character - at least one symbol character - a maximum of 3 consecutive characters from the email address :param email: the user's email address :param password: the input plain-text password :return: boolean """ if len(password) < 10: return False if not re.search(r'[a-z]', password): return False if not re.search(r'[A-Z]', password): return False if not re.search(r'[0-9]', password): return False if not re.search(r'''[`~!@#$%^&*()\-=_+\[\]{}\\|;:'",.<>/?]''', password): return False for i in range(len(email) - 3): if email[i:(i + 4)] in password: return False return True
16bd2d99777a7d0764ce3b697b1c3319c60ccf87
18,928
def autoname(index, sizes): """ Given an index and list of sizes, return a name for the layer. >>> autoname(0, sizes=4) 'input' >>> autoname(1, sizes=4) 'hidden1' >>> autoname(2, sizes=4) 'hidden2' >>> autoname(3, sizes=4) 'output' """ if index == 0: n = "input" elif index == sizes - 1: n = "output" elif sizes == 3: n = "hidden" else: n = "hidden%d" % index return n
c7b426f7b3865472e64b84cab4ddff003cd47576
18,933
def _standardize(dataframe): """Transform features by centering the distribution of the data on the value 0 and the standard deviation to the value 1. The transformation is given by: scaled_value = (value - mean) / standard deviation Parameters ---------- dataframe : pandas.DataFrame The data frame to be used for EDA. Returns ------- res : pandas.core.frame.DataFrame Scaled dataset """ res = dataframe.copy() for feature_name in dataframe.columns: mean = dataframe[feature_name].mean() stdev = dataframe[feature_name].std() res[feature_name] = (dataframe[feature_name] - mean) / stdev return res
8db61585170223056e176e8b444a33a785cec591
18,934
def issues_data(record): """Retrieve issues data from record.""" total = int(record["total_files"]) issues = int(record["files_with_issues"]) correct = total - issues return total, issues, correct
9ff63711f50ef7df1c274d93eec3bd5780d2337d
18,937
import inspect def _wrapped_fn_argnames(fun): """Returns list of argnames of a (possibly wrapped) function.""" return tuple(inspect.signature(fun).parameters)
d68d744051c45c5992e700f06d91121af8738486
18,940
def clean_sql_statement(original: str) -> str: """ Cleans up SQL statements so that they end with a semicolon and don't have any leading or trailing whitespace """ clean = original.strip() if not clean.endswith(";"): clean = clean + ";" return clean
cf8bc73da26cd4cad363b8560170a37c6d5228de
18,946
def sort_keypoints(kps): """ Sort a list of cv2.KeyPoint based on their response """ responses = [kp.response for kp in kps] indices = range(len(responses)) indices = sorted(indices, key=lambda i: responses[i], reverse=True) return [kps[i] for i in indices]
7b1cc49498571715b2715118fb3887384d4e386c
18,948
def aumento_salarial(salario, porcentagem): """ Recebe um salário e sua porcentagem de aumento, e retorna o novo salário""" novosalario = salario + (salario * porcentagem / 100) return round (novosalario, 2)
7578f69bf486ba58374b70be673c1453694b3a72
18,958
def ask(question: str, default: str = "") -> str: """A Simple interface for asking questions to user Three options are given: * question and no default -> This is plain input * question and a default value -> with no user input dafault is returned * question and 'yes'/'no' default -> user can type n, y, yes, no type-insensitive and 'yes' or 'no' is returned in any case Arguments: question (str): the question for the user Keyword Arguments: default (str): a default value (default: {""}) Returns: str -- the user answer of the default (can be empty string) """ if default == 'yes': appendix = " [Y/n] " elif default == 'no': appendix = " [y/N] " elif default: appendix = " [{}] ".format(default) else: appendix = " " try: answer = input(question + appendix) except EOFError as eof: exit("Stdin was closed. Exiting...") return answer if answer else default
e0262592b32f893043b4bc48fd32e99e6b864732
18,961
def mock_check_version(*args, **kwargs): """Necessary mock for KafkaProducer not to connect on init.""" return 0, 10
6b989ed3e6226e17c38569c266c03bb94b3db0d3
18,965
def check_password_vs_blacklist(current_string, blacklist): """Checks a string to determine whether it is contained within a blacklist Arguments: current_string {string} -- the string to be checked against blacklist blacklist {list} -- list of words defined within a given blacklist file Returns: [bool] -- whether current_string was found within the blacklist file """ if (((current_string.strip()).strip('\"') in blacklist) or ((current_string.strip()).strip("'") in blacklist) or ((current_string.strip()).strip('`') in blacklist)): return True else: return False
e11223cc0c8ffcb0d03e9df2dbfa9a71a87e2667
18,967
def plusOneSum(arr): """returns the sum of the integers after adding 1 to each element""" return sum(arr)+len(arr)
a3297841007deec4bf4430bbb18ff6405a7657fa
18,972
def merge(*dicts): """Merges the given dictionaries into a single dictionary, ignoring overlapping keys.""" out = dict() for dictionary in dicts: for (key, val) in dictionary.items(): out[key] = val return out
c9a3899407b36357c046bed594d5939bc7aab4b3
18,973
import math def absolute_error(y, yhat): """Returns the maximal absolute error between y and yhat. :param y: true function values :param yhat: predicted function values Lower is better. >>> absolute_error([0,1,2,3], [0,0,1,1]) 2.0 """ return float(max(map(lambda x, y: math.fabs(x-y), y, yhat)))
c1262e042d3895a9ba06c213b27a1d5cb23c96fb
18,974
import re def clean_wsj(txt, remove_fragments=True): """ Prepares WSJ transcripts according to Hannun et al., 2014: https://arxiv.org/pdf/1408.2873.pdf It is assumed that data has already been processed with Kaldi's s5 recipe. A full overview of the wsj transcription guidelines is found here: https://catalog.ldc.upenn.edu/docs/LDC93S6A/dot_spec.html It is not fully exhaustive which may be due to transcriber variations/mistakes. The final character set will be: - letters: "a-z" - noise-token: "?" - apostrophe: "'" - hyphen: "-" - dot: "." - whitespace: " " Args: txt: Text to be normalized. Returns: str: The normalized string. """ txt = txt.lower() # Explanations for replacements: # - cursive apostrophe [`] should have been ' (very rare) # - double tilde [~~] seems to indicate silence during the full clip (very rare) # - noise-tag [<noise>] used to indicate noise (can be very subtle breathing between words) # - underscore [_] should have been a hyphen (one instance) # - pointy brackets [<...>] seems to indicate verbal insertion, but guidelines says deletion # - word enclosed in asterisk symbols [*hospital*] indicates mispronunciation, but will be spelled correctly though # - semi-colon [;] should have been a . in the abbreviation corp. (one instance) # - wrongful spelling of parentheses txt = txt.replace("`", "'").replace('~', '').replace('<noise>', '').replace('_', '-') txt = txt.replace('<', '').replace('>', '').replace('*', '').replace('corp;', 'corp.') txt = txt.replace('in-parenthesis', 'in-parentheses') # - word fragment in parenthesis [-(repub)lican] indicates missing fragment txt = re.sub("\([a-z'-]+\)", "", txt) # Everything in the remove list is vocalized punctuation, however, a few characters also have other uses: # - colon associated with word [securi:ty or a:] used to indicate lengthening # - prepended exclamation-point [!but] used for emphatic stress # These can, however, simply be removed anyway. remove = ['"', '(', ')', '{', '}', ',', '&', '/', ';', ':', '!'] for char in remove: txt = txt.replace(char, '') # The following is also vocalized punctuation but can not simply be removed, as we sometimes want to keep these: # - hyphen/dash [-] when used to compound words and in the beginning/end of word fragments # - period [.] when used in acronyms and abbreviations # - single-quote ['] when used for contractions and possessive form txt = txt.replace('-dash', 'dash').replace('-hyphen', 'hyphen') txt = txt.replace('.period', 'period').replace('...ellipsis', 'ellipsis') txt = txt.replace("'single-quote", 'single-quote').replace('?question-mark', 'question-mark') if remove_fragments: # adjacent fragements are joined to one word txt = txt.replace('in- -communicado', 'incommunicado') txt = txt.replace('republi- -publicans', 'republicans') txt = txt.replace('gi- -vestigating', 'investigating') txt = txt.replace('exa- -cerbate', 'exacerbate') txt = txt.replace('rel- -linquish', 'relinquish') txt = txt.replace('cata- -lysmic', 'cataclysmic') txt = txt.replace('toro- -ronto', 'toronto') # all simple fragments are removed txt = re.sub(r"([a-z']+-( |$)|( |^)-[a-z']+)", "", txt) # should only be - between verbalized punctuation txt = txt.replace('-', '') # used in front of letters in acronyms and abbreviations txt = txt.replace('.', '') # whitespace normalization: convert whitespace sequences to a single whitespace txt = re.sub("\s+", " ", txt) return txt.strip()
e1e7c4c3f984f4656a17ffddd6c54481083ac094
18,978
def bin_string_to_bytearray(binary_string: str) -> bytearray: """Converts a binary string to a bytearray Parameters ---------- binary_string: str The binary string used to build the bytearray Returns ------- bytearray The generated bytearray """ # Fill in bits if the binary string is not dividable by 8 (byte) binary_string += ((8 - len(binary_string)) % 8) * '0' # Generate the bytearray bytes_array = bytearray() for binary_byte in [binary_string[i:i + 8] for i in range(0, len(binary_string), 8)]: bytes_array.append(int(binary_byte, 2)) return bytes_array
5f1af3a46ee97ad23e3d0a6cb9ded9e1e8568a2b
18,981
def truthy_string(s): """Determines if a string has a truthy value""" return str(s).lower() in ['true', '1', 'y', 'yes']
ee670b353de29165bd2922b53a13b0d4b3908d40
18,987
def expectedPrimerPair(l,r): """ Inputs: left primer Segment right primer Segment Check if 2 primers come from the same primer pair Returns: Boolean """ return l[2] == r[2]
7960463d6579c65ff66282a71c3e3d235d01bcf5
18,991
def lookup(obj): """ returns all the methods and atributes of an object """ return dir(obj)
e26bae9a1eeb4460312963526e059268f3a61a35
18,994
def table_key_url(table_name: str, partition_key: str, row_key: str) -> str: """Creates the url path for the combination of a table and a key""" partition_key = partition_key.replace("'", "''") row_key = row_key.replace("'", "''") return f"{table_name}(PartitionKey='{partition_key}',RowKey='{row_key}')"
ce3c4e0639d8b2d78b754fdf6377fd2a693b703e
18,997
def reset_env(env): """Resets the pendulum in the safe area.""" env.reset() env.env.state = env.np_random.uniform(low=[-0.1, -0.5], high=[0.1, 0.5]) env.env.last_u = None return env.env._get_obs()
55c987fb8bd9011d5fe16e70828bd4acac2b6be6
18,998