content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def _to_int(parsed): """ Transforms the received parsed value into an integer. :param parsed: the parsed value :return: an integer created from the value """ if len(parsed) > 0: return int(parsed[0]) else: return None
1701648c7e42232c8be0c7451d19ffe43f322118
699,589
def get_model_string(model): """ :param model: model :returns: <app_label>.<model_name> string representation for the model """ return "{app_label}.{model_name}".format(app_label=model._meta.app_label, model_name=model._meta.model_name)
d54323680c4a15cc99bbbcea09cf319c571ff9cf
699,590
def check_overscan(xstart, xsize, total_prescan_pixels=24, total_science_pixels=4096): """Check image for bias columns. Parameters ---------- xstart : int Starting column of the readout in detector coordinates. xsize : int Number of columns in the readout. total_prescan_pixels : int Total prescan pixels for a single amplifier on a detector. Default is 24 for WFC. total_science_pixels : int Total science pixels across a detector. Default is 4096 for WFC (across two amplifiers). Returns ------- hasoverscan : bool Indication if there are bias columns in the image. leading : int Number of bias columns on the A/C amplifiers side of the CCDs ("TRIMX1" in ``OSCNTAB``). trailing : int Number of bias columns on the B/D amplifiers side of the CCDs ("TRIMX2" in ``OSCNTAB``). """ hasoverscan = False leading = 0 trailing = 0 if xstart < total_prescan_pixels: hasoverscan = True leading = abs(xstart - total_prescan_pixels) if (xstart + xsize) > total_science_pixels: hasoverscan = True trailing = abs(total_science_pixels - (xstart + xsize - total_prescan_pixels)) return hasoverscan, leading, trailing
934d7683a710e9fe88d488398a9ad9e769470460
699,592
def vec_reverse(a): """ Reverses a vector Parameters ---------- a: list[] A vector of scalar values Returns ------- list[] The reversed vector """ return a[::-1]
b22e13746de1fe8de60d774b1e20f5c5528507ff
699,595
def t04_ValueDNE(C, pks, crypto, server): """Checks that values not stored at the server return None.""" score = 0 alice = C("alice") score += alice.download("a") is None score += alice.download("b") is None score += alice.download("c") is None alice.upload("d", "e") score += alice.download("e") is None score += alice.download("d") == "e" return float(score) / 5.0
af42da60a48d407ad16da6801e847417dbcc69e9
699,596
def get_cell_values(queries, subjects, key=len, attr=None): """Generates the values of cells in the binary matrix. This function calls some specified key function (def. max) against all values of a specified attribute (def. None) from Hits inside Subjects which match each query. By default, this function will just count all matching Hits (i.e. len() is called on all Hits whose query attribute matches). To find maximum identities, for example, provide key=max and attr='identity' to this function. Parameters: queries (list): Names of query sequences. subjects (list): Subject objects to generate vlaues for. key (callable): Some callable that takes a list and produces a value. attr (str): A Hit attribute to calculate values with in key function. """ result = [0] * len(queries) for index, query in enumerate(queries): values = [ getattr(hit, attr) if attr else hit for subject in subjects for hit in subject.hits if hit.query == query ] result[index] = key(values) return result
4725a4c9f9dc291ff0e8595bb25f51c4612f747d
699,598
def and_(fst, snd): """Wraps the logical and function.""" return fst and snd
c03f2cb9178aa2863e2da58e4fe2741f4a98ea79
699,599
def _partition(sequence, size, count): """Partition sequence into count subsequences of size length, and a remainder. Return (partitions, remainder), where partitions is a sequence of count subsequences of cardinality count, and apply(append, partitions) + remainder == sequence.""" partitions = [] for index in range(0, size * count, size): partitions.append(sequence[index:index + size]) return (partitions, sequence[size * count:])
e33e7eaa35e3c57f0decda50e863686f529d5afa
699,600
import re def parse_docket_number(docket_number): """ Parse a Common Pleas docket number into its components. A docket number has the form "CP-46-CR-1234567-2019" This method takes a docket number as a string and returns a dictionary with the different parts as keys """ patt = re.compile( "(?P<court>[A-Z]{2})-(?P<county>[0-9]{2})-" + "(?P<docket_type>[A-Z]{2})-(?P<docket_index>[0-9]{7})-" + "(?P<year>[0-9]{4})") match = patt.match(docket_number) if match is None: return None else: return match.groupdict()
8f9af7709f881cc82bb7d22b420a5361e085de70
699,602
def build_url(param_dict): """ Builds the URL needed to query [University of Wyoming, College of Engineering, Department of Atmospheric Science's website](http://weather.uwyo.edu/upperair/sounding.html) to get the proper sounding data. Parameters ---------- param_dict : dict A dictionary containing the station number, year, month, and day/hour of the desired date and location. Returns ------- full_url : string String of the query URL with the proper date and location of the desired atmospheric sounding. """ base_url = 'http://weather.uwyo.edu/cgi-bin/sounding?TYPE=TEXT%3ALIST' full_url = base_url for key in param_dict.keys(): full_url += '&' + key + '=' + param_dict[key] return full_url
77825ae917650d1b832333920b6363d4dac7c36b
699,604
def in_month(ref_date): """ which month contains a reference date :param ref_date: mx.DateTime reference date :rtype: range_string e.g. 2007-M12 """ return "%4d-M%02d" % (ref_date.year, ref_date.mon)
fe8126529521565e35f2012e359d7a9c57b773d1
699,606
def _precipitable_water(ea, pair): """Precipitable water in the atmosphere (Eq. D.3) Parameters ---------- ea : ee.Image Vapor pressure [kPa]. pair : ee.Image or ee.Number Air pressure [kPa]. Returns ------- ee.Image or ee.Number Precipitable water [mm]. Notes ----- w = pair * 0.14 * ea + 2.1 """ return ea.multiply(pair).multiply(0.14).add(2.1)
a56b3948167fb22bb49b6f84bd3980a2666dd345
699,607
from pathlib import Path def fullpath(path=""): """ Path: Expand relative paths and tildes. """ return Path.cwd() / Path(path).expanduser()
e7b7d1caa67a2a9988035713ec18347b5581406e
699,611
import io def read_file(path): """Returns all the lines of a file at path as a List""" file_lines = [] with io.open(path, mode="rt", encoding="utf-8") as the_file: file_lines = the_file.readlines() return file_lines
8e220e0b90ded168a1d1d8d37b7451b7796b1ed5
699,612
import turtle def new_horse(image_file): """(str) -> turtle Create a new horse where <image_file> is a valid shapename Returns a turtle object """ horse = turtle.Turtle() horse.hideturtle() horse.shape(image_file) return horse
8abd7ea09cfe3340c06250f430ee6f25b03f45aa
699,615
import re def sort_simulations(df_ts, dyn_dend_order): """ Sorts the simulations in the dataframe according to the order in the list dyn_dend_order """ # Create a dictionary with the order of each simulation row in the plot dyn_dend_order_dict = { dyn_name : dyn_dend_order.index(dyn_name) for dyn_name in dyn_dend_order } # Adding column based in new order recieved from clustering df_ts['clust_order'] = df_ts['Id'].apply(lambda x: dyn_dend_order_dict[x]) #Sorting by ballesteros Id's (helixloop column) and clustering order df_ts['helixloop'] = df_ts['Position'].apply(lambda x: re.sub(r'^(\d)x',r'\g<1>0x',x)) df_ts = df_ts.sort_values(["helixloop",'clust_order']) #Drop sort columns once used df_ts.drop(['helixloop','clust_order'], axis = 1, inplace = True) return df_ts
68ae24234698803e9f5dd861d7f5255f845401c7
699,616
import csv def load_penetration(scenario, path): """ Load penetration forecast. """ output = {} with open(path, 'r') as source: reader = csv.DictReader(source) for row in reader: if row['scenario'] == scenario.split('_')[0]: output[int(row['year'])] = float(row['penetration']) return output
23a9730e9d8ec524fe79eaf50c3adb2a1641d002
699,617
def get4Neighbors(img, i, j): """ Get the 4 neighbours for the pixel analysed. Parameters: img, image; i, row number; j, column number. Returns: neighbors, list of neighbors. """ N, M = img.shape neighbors = [] if i - 1 >= 0: neighbors.append(img[i-1][j]) if j - 1 >= 0: neighbors.append(img[i][j-1]) if j - 1 >= 0 and i - 1 >= 0: neighbors.append(img[i-1][j-1]) if j + 1 < M and i - 1 >= 0: neighbors.append(img[i-1][j+1]) return neighbors
e3765a34ad02b9cf8d1be19f6e0e03db20ea28df
699,618
def input_block(config, section): """Return the input block as a string.""" block = '' if section not in config: return '' for key in config[section]: value = config[section][key].strip() if value: block += '{} {}\n'.format(key, value) else: block += key + '\n' return block
4f6ff5979c99171b390429bc6e90b325986d9f99
699,620
import hashlib def git_hash_data(data, typ='blob'): """Calculate the git-style SHA1 for some data. Only supports 'blob' type data at the moment. """ assert typ == 'blob', 'Only support blobs for now' return hashlib.sha1(b'blob %d\0%s' % (len(data), data)).hexdigest()
b3195de0c04444a811308e8b2b79b5d136095ea8
699,624
def transpose(table): """Returns: copy of table with rows and columns swapped Precondition: table is a (non-ragged) 2d List""" new_table = [] n_row = len(table) n_col = len(table[0]) for col in range(n_col): each_col = [] for row in range(n_row): each_col.append(table[row][col]) new_table.append(each_col) # print(table) # print(new_table) return new_table
11c74863d8b1941f1a74fb1c054f67b8dd2f22e8
699,628
def getSheetContent(sheet): """ Returns two dimensional array of all non empty content of the given sheet. Array contains string and double values. """ cursor = sheet.createCursor() cursor.gotoStartOfUsedArea(False) cursor.gotoEndOfUsedArea(True) return [list(row) for row in cursor.getDataArray()]
9a77c7dadcdf5247f30fa3dc4fa87cc5683cfee2
699,633
def sbas_nav_decode(dwrds: list) -> dict: """ Helper function to decode RXM-SFRBX dwrds for SBAS navigation data. :param list dwrds: array of navigation data dwrds :return: dict of navdata attributes :rtype: dict """ return {"dwrds": dwrds}
215d945ac939a817ae84fa08607b72e0c14a1cc9
699,634
def constant(step, total_train_steps, value=1.0): """Constant learning rate (multiplier). Args: step: a tf.Scalar total_train_steps: a number value: a number or tf.Scalar Returns: a tf.Scalar, the learning rate for the step. """ del step, total_train_steps return value
53285310764c8d627ae366b2ec8e5ff98339e612
699,635
def set_image(images, row, issues, pre, overrides={}, verbose=False): """ Update an image based on known issues. Given an image, image metadata, and a set of known issues, determine if any of the known issues apply to the image in question and, if they do, make the appropriate edits to the image. Parameters ---------- images : dict Dict of (SCI, ERR, DQ) np.ndarray images row : abscal.common.exposure_data_table.AbscalDataTable A single-row table containing metadata on the image issues : dict A dictionary containing a set of parameters, along with information to identify files whose parameters should be adjusted. overrides : dict A dictionary containing any parameters whose value is being overridden by the user. verbose : bool Whether or not informational output should be printed. Returns ------- image : tuple Tuple of (SCI, ERR, DQ) np.ndarray images, as edited. """ # print("set_image with {}, {}, {}, {}".format(images, row, issues, overrides)) for issue in issues: # print(issue) # print(issue["column"], type(issue["column"])) # print(row) found = False if issue["column"] in row: if isinstance(issue["column"], str): issue_len = len(issue["value"]) if issue["value"] == row[issue["column"]][:issue_len]: found = True else: if issue["value"] == row[issue["column"]]: found = True if found: if len(issue["x"]) > 1: x1, x2 = issue["x"][0], issue["x"][1] else: x1, x2 = issue["x"][0], issue["x"][0]+1 if len(issue["y"]) > 1: y1, y2 = issue["y"][0], issue["y"][1] else: y1, y2 = issue["y"][0], issue["y"][0]+1 images[issue["ext"]][y1:y2,x1:x2] = issue["value"] if verbose: reason = issue["reason"] source = issue["source"] value = issue["value"] msg = "{}: changed ({}:{},{}:{}) to {} because {} from {}" print(msg.format(pre, y1, y2, x1, x2, value, reason, source)) return images
fae11f006dc93abdc0b04dc0aaf09c2ce4642450
699,637
def boyer_moore_preprocessing(pattern, alphabet_size=4): """ Bad character rule used by Boyer-Moore algorithm: For each character x in the alphabet, let R(x) be the position of right-most occurrence of character x in P. R(x) is defined to be zero if x does not occur in P. """ R = [0] * alphabet_size for i in range(len(pattern)): R[pattern[i]] = i return R
1d70891cfe0f0f55579c7a9349fb24d6954379fd
699,639
def max_consecutive_sum(array): """ given an array of numbers (positive, negative, or 0) return the maximum sum of consecutive numbers """ max_value = max(array) running_sum = 0 for num in array: if running_sum < 0: running_sum = 0 running_sum += num if running_sum > max_value: max_value = running_sum return max_value
32e7322f8936f8399ec2ebe0702dbb10332cb529
699,640
def _mask_for_bits(i): """Generate a mask to grab `i` bits from an int value.""" return (1 << i) - 1
53fc285225632cce34a74536a085cfe0af10300a
699,644
import random def random_int(min_num=1, max_num=200): """ return an int inclusively between min_nim and max_num :param min_num: :param max_num: :return: {int} a number """ return random.randint(min_num, max_num)
0c481f889f4a40e8a72a1efa44244e113f395168
699,646
def checkIfDuplicates_2(listOfElems): """ Check if given list contains any duplicates """ setOfElems = set() for elem in listOfElems: if elem in setOfElems: return True else: setOfElems.add(elem) return False
a7d9f322faefa4b0b0191ca96097bbf38c61ee3d
699,649
async def root(): """ Default endpoint for testing if the server is running :return: Positive JSON Message """ return {"MLDatasetTemplate is Running!"}
17fccde4f21561a5166e39ca43df9e88539e0b2e
699,651
def delete(context, key): """Delete a key from the current task context.""" return context.pop(key, None)
0b697ede943653ba41e7c50fff86907f93becee1
699,653
def matches_beginning(prefix: str, allowlist_key: str) -> bool: """" :param prefix: the value of the prefix query parameter :param allowlist_key: the key from :return: a bool of whether the prefix can be found on the allowlist. Both values are stripped of leading `/` before comparison. """ return prefix.lstrip('/').find(allowlist_key.lstrip('/')) == 0
ef047dfe16722d98b8fe894d3c400330a2defd74
699,654
import torch def create_tau( fval, gradf, d1x, d2x, smoothing_operator=None ): """ tau = create_tau( fval, gradf, d1x, d2x ) In: fval: torch.FloatTensor of shape B gradf: torch.FloatTensor of shape B*C*H*W d1x: torch.FloatTensor of shape B*C*H*W d2x: torch.FloatTensor of shape B*C*H*W smoothing_operator: function A self-adjoint smoothing operator sending torch.FloatTensor of shape B*2*H*W to torch.FloatTensor of shape B*2*H*W. Out: tau: torch.FloatTensor of shape B*H*W*2 """ B,C,H,W = gradf.shape # Sum over color channels alpha1 = torch.sum( gradf*d1x, 1).unsqueeze_(1) alpha2 = torch.sum( gradf*d2x, 1).unsqueeze_(1) # stack vector field components into shape B*2*H*W tau = torch.cat([alpha1,alpha2], 1) # Smoothing if smoothing_operator: tau = smoothing_operator( tau ) # torch can't sum over multiple axes. norm_squared_alpha = (tau**2).sum(1).sum(1).sum(1) # In theory, we need to apply the filter a second time. tau = smoothing_operator( tau ) else: # torch can't sum over multiple axes. norm_squared_alpha = (tau**2).sum(1).sum(1).sum(1) scale = -fval/norm_squared_alpha tau *= scale.view(B,1,1,1) # rearrange for compatibility with compose(), B*2*H*W -> B*H*W*2 return tau.permute( 0, 2, 3, 1 ).detach()
5e086908e432fbc6a34e1ce72bee84a1f467823e
699,656
def format_seconds(delta): """ Given a time delta object, calculate the total number of seconds and return it as a string. """ def _total_seconds(td): return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / 10 ** 6 return '%s' % (_total_seconds(delta))
60248f96a64b04be6480e27aa134c943461a6daa
699,657
def convert_str_to_list(sequence: str, is_ordered_sequence: bool = True, is_first_term_seq_name: bool = True): """ sequence: A string that contains a comma seperated numbers is_first_term_seq_name: True to drop the first term (i.e. A01255,1,3,5, ...) return: A list of integers in a list (String ---> List) """ terms_split: list sequence = sequence.strip().strip(",") if is_ordered_sequence: terms_split = sequence.split(",") else: terms_split = sequence.split(" ") if is_first_term_seq_name: del terms_split[0] # Delete the name of the sequence int_list: list = [0] * (len(terms_split)) for idx in range(0, len(terms_split)): int_list[idx] = int(terms_split[idx]) return int_list
f73213ae3484e00824920722eb58368bb116886b
699,663
def filter_chants_without_notes(chants, logger=None): """Exclude all chants without notes""" notes_pattern = r'[89abcdefghjklmnopqrs\(\)ABCDEFGHJKLMNOPQRS]+' contains_notes = chants.volpiano.str.contains(notes_pattern) == True return chants[contains_notes]
98324a2b9c17d975ebfc7860ad9ca38e65db481e
699,666
def correct_eval_poly(d): """This function evaluates the polynomial poly at point x. Poly is a list of floats containing the coeficients of the polynomial poly[i] -> coeficient of degree i Parameters ---------- poly: [float] Coefficients of the polynomial, where poly[i] -> coeficient of degree i x : float Point Returns ------- float Value of the polynomial at point x Example ------- >>> eval_poly( [1.0, 1.0], 2) 3.0 """ poly, x = d["poly"], d['x'] result = 0.0 power = 1 degree = len(poly) - 1 i = 0 while i <= degree: result = result + poly[i] * power power = power * x i = i + 1 return (d,result)
5a0042c0fb28a5fa4891f86b8b8fa70516fbed34
699,667
import torch def zeros(shape, dtype=None, device = None): """ Creates a tensor with all elements set to zero. Parameters ---------- shape : A list of integers a tuple of integers, or a 1-D Tensor of type int32. dtype : tensor The DType of an element in the resulting Tensor Returns ------- A Tensor with all elements set to zero. """ if device == 'cpu': device = torch.device('cpu') elif device == 'gpu': device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') return torch.zeros(size=shape, dtype=dtype, device = device)
e8e7b18a1f0d2999152504536709388440c56f1a
699,668
import random def chapter_uid_generator() -> str: """Random number generator for Mastroka chapter UIDs.""" return str(random.choice(range(int(1E18), int(1E19))))
024ea43a93d3e94576324364375542338b859d13
699,669
from datetime import datetime def generate_header(header: str = "") -> str: """ Generates a file header. Args: header: A custom header to insert. Returns: str: The generated header of a file. """ syntax = " Warning generated file ".center(90, "-") date = datetime.now().isoformat() syntax += "\n" syntax += f"Generated at: {date}" syntax += "\n" syntax += "".center(90, "-") return header if header else syntax
0821aef1f5f77dcd7b9fb1bcbcbca74749c3ed4a
699,671
from typing import Callable import time def create_timer() -> Callable[[], float]: """Create a timer function that returns elapsed time since creation of the timer function""" start = time.time() def elapsed(): return time.time() - start return elapsed
e97fbb8b6fded209d1e5659f548feac726d9cf04
699,681
from typing import Mapping def recursive_update(old_dict, update_dict): """ Update one embed dictionary with another, similar to dict.update(), But recursively update dictionary values that are dictionaries as well. based on the answers in https://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth """ for k, v in update_dict.items(): if isinstance(v, Mapping): old_dict[k] = recursive_update(old_dict.get(k, {}), v) else: old_dict[k] = v return old_dict
7f0c4fdca6a58f8416e5f9c2762918fa776d8d9d
699,685
import copy def merge_dicts(dict1, dict2): """Recursively merge two dictionaries. Values in dict2 override values in dict1. If dict1 and dict2 contain a dictionary as a value, this will call itself recursively to merge these dictionaries. This does not modify the input dictionaries (creates an internal copy). Parameters ---------- dict1: dict First dict. dict2: dict Second dict. Values in dict2 will override values from dict1 in case they share the same key. Returns ------- return_dict: dict Merged dictionaries. """ if not isinstance(dict1, dict): raise ValueError(f"Expecting dict1 to be dict, found {type(dict1)}.") if not isinstance(dict2, dict): raise ValueError(f"Expecting dict2 to be dict, found {type(dict2)}.") return_dict = copy.deepcopy(dict1) for k, v in dict2.items(): if k not in dict1: return_dict[k] = v else: if isinstance(v, dict) and isinstance(dict1[k], dict): return_dict[k] = merge_dicts(dict1[k], dict2[k]) else: return_dict[k] = dict2[k] return return_dict
b3dccd6301be21a096bb3d299793b4cf1461c3d9
699,686
def adder_function(args): """Dummy function to execute returning a single float.""" loc, scale = args return loc + scale
7646bb1acc324f05c92268b78fefaa75283f812a
699,688
import requests def get_api_results(url, id): """[summary] Args: url ([str]): [External API url] id ([int]): [member id] Returns: [json]: [API request response] """ r = requests.get(url.format(id)) return r.json()
4dc686c616f3ea9124c866b593d44bdc63e54d1d
699,689
def window_optical_flow(vec, window): """ Return pairs of images to generate the optical flow. These pairs contains the first and the last image of the optical flow according to the size of the window. Parameters: ----------- vec : array_like sorted list containing the image ids (type int) window : int size of the window to generate the optical flow Returns: -------- pairs : array_like list containing tuples with pairs as (first image, last image) of the optical flow Usage: ------ >>> vec = [0, 1, 2, 3] >>> window_optical_flow(vec, 2) [(0, 2), (1, 3), (2, 3), (3, 3)] """ pairs = [] for img in vec: last_img = img + window if last_img in vec: pairs.append((img, last_img)) else: pairs.append((img, vec[-1])) return pairs
dde566f6eff6845cb16a1acec95a7bdbc4609b11
699,691
def text_width(text_item): """Returns width of Autocad `Text` or `MultiText` object """ bbox_min, bbox_max = text_item.GetBoundingbox() return bbox_max[0] - bbox_min[0]
001816ff5937d1e286f00088ede076b86590a951
699,692
from bs4 import BeautifulSoup def is_html(string): """ Check if string contains html. If html, return true, otherwise, return false. """ result = bool(BeautifulSoup(string, 'html.parser').find()) return result
404723e869608ad7949c144c2f11c0bf629b0262
699,694
import codecs def get_text(filename:str) -> str: """ Load and return the text of a text file, assuming latin-1 encoding as that is what the BBC corpus uses. Use codecs.open() function not open(). """ f = codecs.open(filename, encoding='latin-1', mode='r') s = f.read() f.close() return s
43d6036ba8c10946d704dee1cd32b1968de5c199
699,696
def evidence_type_number_to_name(num: int) -> str: """ Transforms evidence type number to it's corresponding name :param num: The evidence type number :return: The string name of the evidence type """ name: str = str() supported_types = ['Network', 'Process', 'File', 'Registry', 'Security', 'Image', 'DNS'] try: name = supported_types[num - 1] except IndexError: name = 'Unknown' finally: return name
1f6a8e57334e08e997a3f86e629df04cb9602594
699,699
def axis_slicer(n, sl, axis): """ Return an indexing tuple for an array with `n` dimensions, with slice `sl` taken on `axis`. """ itup = [slice(None)] * n itup[axis] = sl return tuple(itup)
0fdd64be34428da20c79d8c52a22c916cb5afe19
699,705
def zzx_degree(f): """Returns leading degree of f in Z[x]. """ return len(f) - 1
f24b966a69c998014a54542d906bbbf62f027126
699,706
def is_visible(self, y): """Checks whether a given point is within the currently visible area of the markdown area. The function is used to handle text which is longer than the specified height of the markdown area and during scrolling. :param self: MarkdownRenderer :param y: y-coordinate :return: boolean """ return not self.is_above_area(y) and not self.is_below_area(y)
aa982d8fadf70f970e084ead9be07916d2599217
699,709
import resource def _IncreaseSoftLimitForResource(resource_name, fallback_value): """Sets a new soft limit for the maximum number of open files. The soft limit is used for this process (and its children), but the hard limit is set by the system and cannot be exceeded. We will first try to set the soft limit to the hard limit's value; if that fails, we will try to set the soft limit to the fallback_value iff this would increase the soft limit. Args: resource_name: Name of the resource to increase the soft limit for. fallback_value: Fallback value to be used if we couldn't set the soft value to the hard value (e.g., if the hard value is "unlimited"). Returns: Current soft limit for the resource (after any changes we were able to make), or -1 if the resource doesn't exist. """ # Get the value of the resource. try: (soft_limit, hard_limit) = resource.getrlimit(resource_name) except (resource.error, ValueError): # The resource wasn't present, so we can't do anything here. return -1 # Try to set the value of the soft limit to the value of the hard limit. if hard_limit > soft_limit: # Some OS's report 0 for "unlimited". try: resource.setrlimit(resource_name, (hard_limit, hard_limit)) return hard_limit except (resource.error, ValueError): # We'll ignore this and try the fallback value. pass # Try to set the value of the soft limit to the fallback value. if soft_limit < fallback_value: try: resource.setrlimit(resource_name, (fallback_value, hard_limit)) return fallback_value except (resource.error, ValueError): # We couldn't change the soft limit, so just report the current # value of the soft limit. return soft_limit else: return soft_limit
aa71fa41f721a612e44c7ce9b65a025f2e9d1bba
699,710
def get_edges(tree): """ Get edges from url tree. Where url tree is tuple of (url, list of tuples( url, list)) etc. Example tree: (url, [(url1, [...]), (url2, [...]). ]) Parameters ---------- tree : tuple Tree of urls. Returns ------- list List of tuples (source page, end page). """ edges = [] url, elements = tree if isinstance(elements, list): for element in elements: if isinstance(element, str): edges.append((url.split("/")[-1], element.split("/")[-1])) else: edges.append((url.split("/")[-1], element[0].split("/")[-1])) edges += get_edges(element) return edges
f0e5d591e1453c6b7507c889a51f9c7064f4be39
699,711
def asstring(bitArray): """ Return string representation of bit array Parameters: bitArray (list): an array of bits Returns: (string): string form of bitArray """ return ''.join([str(bit) for bit in bitArray])
23df601aa1b66c89428004d2e1e8a5961066c8be
699,713
def is_in_fold_innermost_scope_scope(context): """Return True if the current context is within a scope marked @fold.""" return 'fold_innermost_scope' in context
6bed2406d28ce17c09c6dd293fe61e7f70fbf4b2
699,714
import collections def coords_assign(coords, dim, new_name, new_val): """Reassign an xray.DataArray-style coord at a given dimension. Parameters ---------- coords : collections.OrderedDict Ordered dictionary of coord name : value pairs. dim : int Dimension to change (e.g. -1 for last dimension). new_name : string New name for coordinate key. new_val : any New value, e.g. numpy array of values Returns ------- new_coords : collections.OrderedDict Ordered dictionary with altered dimension. Example ------- lat = np.arange(89., -89., -1.0) lon = np.arange(0., 359., 1.) data = np.ones((len(lat), len(lon)), dtype=float) coords = coords_init(data) coords = coords_assign(coords, -1, 'lon', lon) coords = coords_assign(coords, -2, 'lat', lat) """ items = list(coords.items()) items[dim] = (new_name, new_val) new_coords = collections.OrderedDict(items) return new_coords
ed5210ec2f5399aa8302eadc53e515bdd6722307
699,715
def add_clusters(data, clusters): """ Adds the cluster predictions to the original data for interpretation. :param data: DataFrame. The data to have the cluster predictions added on to. :param clusters: List. The list of cluster predictions to be added to the DataFrame. """ addclusters = data addclusters["cluster"] = clusters return addclusters
5a91c9af1bccf6ee76d419ceba3274dcecef7535
699,719
def compose_gates(cliff, gatelist): """ Add gates to a Clifford object from a list of gates. Args: cliff: A Clifford class object. gatelist: a list of gates. Returns: A Clifford class object. """ for op in gatelist: split = op.split() q1 = int(split[1]) if split[0] == 'v': cliff.v(q1) elif split[0] == 'w': cliff.w(q1) elif split[0] == 'x': cliff.x(q1) elif split[0] == 'y': cliff.y(q1) elif split[0] == 'z': cliff.z(q1) elif split[0] == 'cx': cliff.cx(q1, int(split[2])) elif split[0] == 'h': cliff.h(q1) elif split[0] == 's': cliff.s(q1) elif split[0] == 'sdg': cliff.sdg(q1) else: raise ValueError("Unknown gate type: ", op) return cliff
bcbade0ec400b46805f73512a1c2fc64ac866404
699,721
import math def color_distance(from_color, to_color): """ Calculate the euclidean distance of two colors in 3D space """ return math.sqrt( (from_color[0] - to_color[0]) ** 2 + (from_color[1] - to_color[1]) ** 2 + (from_color[2] - to_color[2]) ** 2 )
b72e9101682bd498ed21e8fb9f73b8f52401b888
699,723
def _get_native_location(name): # type: (str) -> str """ Fetches the location of a native MacOS library. :param name: The name of the library to be loaded. :return: The location of the library on a MacOS filesystem. """ return '/System/Library/Frameworks/{0}.framework/{0}'.format(name)
53cb9ac2a771883b791a111e53e23bd77c08f43b
699,728
def ra2float(ra): """ Convert ra to degress (float). ra can be given as a time string, HH:MM:SS.SS or as string like '25.6554' or (trivially) as a float. An exception is thrown if ra is invalid 360 deg = 24 hrs, 360/24 = 15 """ if ra is None: return ra if type(ra) is float or type(ra) is int: return float(ra) if (type(ra) is str or type(ra) is str) and ra.find(':') == -1: return float(ra) try: return float(ra) # catch numpy types except: pass assert type(ra) is str,'Invalid parameter format (ra2float - data type %r)' % type(ra) h,m,s = ra.strip().split(':') if h.find('-') != -1: h=h.replace('-','') sign = -1.0 else: sign = 1.0 return sign*(float(h)*15.0 + float(m)/4.0 + float(s)/240.0)
d16e1163f9e821fdff4344eacf7c29b08a8ba266
699,729
def split_unknown_args(argv: list[str]) -> tuple[list[str], list[str]]: """Separate known command-line arguments from unknown one. Unknown arguments are separated from known arguments by the special **--** argument. :param argv: command-line arguments :return: tuple (known_args, unknown_args) """ for i in range(len(argv)): if argv[i] == "--": return argv[:i], argv[i + 1 :] return argv, []
14e6f202e105cb1001563e9a5a5fc1c5f4bd9fd0
699,730
def byte_to_megabyte(byte): """ Convert byte value to megabyte """ return byte / (1024.0 ** 2)
1b410bcec539e3946a7b5751b984758f89a7ed96
699,732
import string def decode(digits, base): """Decode given digits in given base to number in base 10. digits: str -- string representation of number (in given base) base: int -- base of given number return: int -- integer representation of number (in base 10)""" # Handle up to base 36 [0-9a-z] assert 2 <= base <= 36, 'base is out of range: {}'.format(base) result = 0 power = len(digits)-1 for i in range(len(digits)): if digits[i] in string.ascii_lowercase: digit = ord(digits[i]) - 87 elif digits[i] in string.ascii_uppercase: digit = ord(digits[i]) - 55 else: digit = int(digits[i]) num = (base**power) * digit result += num power -= 1 return result
febdf9973a73de5b3686a20b8c2a5738391a815e
699,734
def normalize_variant(variant: str) -> str: """ Normalize variant. Reformat variant replace colons as separators to underscore. chromosome:position:reference:alternative to chromosome_position_reference_alternative :param variant: string representation of variant :return: reformatted variant """ cpra = variant.split(":") cpra[0] = "X" if cpra[0] == "23" else cpra[0] return "_".join(cpra)
2dc97b7f7b09add6a8062db94376c1ab030ff07c
699,739
from typing import Tuple def compute_limits( numdata: int, numblocks: int, blocksize: int, blockn: int ) -> Tuple[int, ...]: """Generates the limit of indices corresponding to a specific block. It takes into account the non-exact divisibility of numdata into numblocks letting the last block to take the extra chunk. Parameters ---------- numdata : int Total number of data points to distribute numblocks : int Total number of blocks to distribute into blocksize : int Size of data per block blockn : int Index of block, from 0 to numblocks-1 Return ---------- start : int Position to start assigning indices end : int One beyond position to stop assigning indices """ start = blockn * blocksize end = start + blocksize if blockn == (numblocks - 1): # last block gets the extra end = numdata return start, end
748344d60baa8f2ecd31ce822c0e33aca981bc13
699,740
import json def getToken(response): """ Get the tokenised card reference from the API response :param response: Response object in JSON :return: String - token """ resp_dict = json.loads(response.text) try: token = resp_dict["token"] except KeyError: print('Retrieval unsuccessful.') return None return token
b849f3b021b995b164b99690a82ecabf881bb18b
699,741
def _read_transition_statistics_from_files(model, verbose): """Parses the transitions statistics from the simulation output files for later analysis Parameters ------- model : obj object containing all anchor and milestone information Returns --------- total_steps : int total number of MD steps taken in all simulations in all anchors """ total_steps = 0 for site in model.sites: for anchor in site.anchors: if anchor.md == True and anchor.directory: #if verbose: print 'parsing md transitions for:Anchor', milestone.fullname #print info['max_steps'] print('parsing md transitions for:Anchor', anchor.fullname) max_steps = anchor._parse_md_transitions() print(max_steps, total_steps) if max_steps > total_steps: total_steps = max_steps return total_steps
1a4f326bd628e6ddd9475c9610b92cb2ba564bba
699,742
def createSubsetGafDict(subset, gafDict): """ Generates a dictionary that maps the subset's Uniprot ACs to the GO IDs, based on the provided gene subset and the gaf dictionary. Parameters ---------- subset : set of str A subset of Uniprot ACs of interest. gafDict : dict of str mapping to set A dictionary that maps Uniprot ACs (str) to a set GO IDs. Generated by importGAF(). Returns ------- dict of str mapping to set A dictionary that maps the subset's Uniprot ACs to GO IDs. """ gafSubsetDict = {gene: gafDict[gene] for gene in subset if gene in gafDict} return gafSubsetDict
76e69cd79c984a19254df171403c008405276408
699,743
def shortest_paths(graph, vertex_key): """Uses Dijkstra's algorithm to find the shortest path from `vertex_key` to all other vertices. If we have no lengths, then each edge has length 1. :return: `(lengths, prevs)` where `lengths` is a dictionary from key to length. A length of -1 means that the vertex is not connected to `vertex_key`. `prevs` is a dictionary from key to key, giving for each vertex the previous vertex in the path from `vertex_key` to that vertex. Working backwards, you can hence construct all shortest paths. """ shortest_length = { k : -1 for k in graph.vertices } shortest_length[vertex_key] = 0 candidates = {vertex_key} done = set() prevs = {vertex_key:vertex_key} while len(candidates) > 0: next_vertex, min_dist = None, -1 for v in candidates: dist = shortest_length[v] if min_dist == -1 or dist < min_dist: min_dist = dist next_vertex = v candidates.discard(next_vertex) done.add(next_vertex) for v in graph.neighbours(next_vertex): edge_index, _ = graph.find_edge(next_vertex, v) dist = min_dist + graph.length(edge_index) current_dist = shortest_length[v] if current_dist == -1 or current_dist > dist: shortest_length[v] = dist prevs[v] = next_vertex if v not in done: candidates.add(v) return shortest_length, prevs
f2ac9abf9292364099748475988d4ee1dbeb4b23
699,744
def process_overall_mode_choice(mode_choice_data): """Processing and reorganizing the data in a dataframe ready for plotting Parameters ---------- mode_choice_data: pandas DataFrame From the `modeChoice.csv` input file (located in the output directory of the simulation) Returns ------- mode_choice: pandas DataFrame Mode choice data that is ready for plotting. """ mode_choice = mode_choice_data # Select columns w/ modes mode_choice = mode_choice.iloc[-1,:] mode_choice = mode_choice.drop(["iterations"]) # Replace "ride_hail" by "on_demand ride" mode_choice.rename({"ride_hail":"on-demand ride"}, inplace = True) return mode_choice
870685017d223f8a277265f80eea56e50eedec90
699,745
def nb_year(p0, percent, aug, p): """ Finds the amount of years required for the population to reach a desired amount. :param p0: integer of starting population. :param percent: float of percent increase per year. :param aug: integer of new inhabitants. :param p: integer of desired population. :return: the amount of years to reach the population """ if p0 >= p: return 0 else: return 1 + nb_year(p0 + (p0 * percent / 100) + aug, percent, aug, p)
054496347fc8bedca3424143d48d122712dd1363
699,748
def tshark_read( device, capture_file, packet_details=False, filter_str=None, timeout=60, rm_file=True, ): """Read the packets via tshark :param device: lan or wan... :type device: Object :param capture_file: Filename in which the packets were captured :type capture_file: String :param packet_details: output of packet tree (Packet Details) :type packet_details: Bool :param filter_str: capture filter, ex. 'data.len == 1400' :type filter_str: String :param timeout: timeout after executing the read command; default is 30 seconds :type timeout: int :param rm_file: Flag to remove capture file :type rm_file: bool """ command_string = "tshark -r {} ".format(capture_file) if packet_details: command_string += "-V " if filter_str: command_string += "{}".format(filter_str) device.sendline(command_string) device.expect(device.prompt, timeout=timeout) output = device.before if rm_file: device.sudo_sendline("rm %s" % (capture_file)) device.expect(device.prompt) return output
8fc31098e750691a1aa7c27a868abf0d6254adec
699,749
def chunk_size(request): """ Set the chunk size for the source (or None to use the default). """ return request.param
c57269f434790953d475a2791c862d70d204ed86
699,756
from datetime import datetime def isoformat(dt: datetime) -> str: """ISO format datetime object with max precision limited to seconds. Args: dt: datatime object to be formatted Returns: ISO 8601 formatted string """ # IMPORTANT should the format be ever changed, be sure to update TIMESTAMP_REGEX as well! return dt.isoformat(timespec="seconds")
679ce7aa71ab30e4c78a0953272c17f487714177
699,758
def _recursive_namedtuple_convert(data): """ Recursively converts the named tuples in the given object to dictionaries :param data: An object in a named tuple or its children :return: The converted object """ if isinstance(data, list): # List return [_recursive_namedtuple_convert(item) for item in data] elif hasattr(data, '_asdict'): # Named tuple dict_value = dict(data._asdict()) for key, value in dict_value.items(): dict_value[key] = _recursive_namedtuple_convert(value) return dict_value else: # Standard object return data
292bc249b056c14eb1c700561d366ff4e6e64a10
699,760
def _find_start(score_matrix, align_globally): """Return a list of starting points (score, (row, col)). Indicating every possible place to start the tracebacks. """ nrows, ncols = len(score_matrix), len(score_matrix[0]) # In this implementation of the global algorithm, the start will always be # the bottom right corner of the matrix. if align_globally: starts = [(score_matrix[-1][-1], (nrows - 1, ncols - 1))] else: starts = [] for row in range(nrows): for col in range(ncols): score = score_matrix[row][col] starts.append((score, (row, col))) return starts
361a1ea87ecf9bbef0950521ed0fdcfd70b7b608
699,761
def get_f1_score(precision, recall): """ Calculate and return F1 score :param precision: precision score :param recall: recall score :return: F1 score """ return (2 * (precision * recall)) / (precision + recall)
e94dd20acac443be9856b9dbb43adf2ead2e0ba5
699,762
def bh2u(x: bytes) -> str: """ str with hex representation of a bytes-like object >>> x = bytes((1, 2, 10)) >>> bh2u(x) '01020A' """ return x.hex()
8ab7bf9b536d13a1944e014ea83a4302917c2306
699,763
from typing import List from functools import reduce def decode(obs: int, spaces: List[int]) -> List[int]: """ Decode an observation from a list of gym.Discrete spaces in a list of integers. It assumes that obs has been encoded by using the 'utils.encode' function. :param obs: the encoded observation :param spaces: the list of gym.Discrete spaces from where the observation is observed. :return: the decoded observation. """ result = [] sizes = spaces[::-1] shift = reduce(lambda x, y: x*y, sizes) // sizes[0] for size in sizes[1:]: r = obs // shift result.append(r) obs %= shift shift //= size result.append(obs) return result[::-1]
6c3c1348776b7b164cf70a5bfa9da3e8b53a280f
699,765
import re def duration_to_seconds(duration): """ Convert duration string to seconds :param duration: as string (either 00:00 or 00:00:00) :return: duration in seconds :class:`int` or None if it's in the wrong format """ if not re.match("^\\d\\d:\\d\\d(:\\d\\d)?$", duration): return None array = duration.split(':') if len(array) == 2: return int(array[0]) * 60 + int(array[1]) return int(array[0]) * 3600 + int(array[1]) * 60 + int(array[2])
25340e85fdc2db03eaa65aba60a158f951da389a
699,769
def flatten_tests(test_classes): """ >>> test_classes = {x: [x] for x in range(5)} >>> flatten_tests(test_classes) [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)] >>> test_classes = {x: [x + 1, x + 2] for x in range(2)} >>> flatten_tests(test_classes) [(0, 1), (0, 2), (1, 2), (1, 3)] """ tests = [] for class_name, test_names in test_classes.items(): tests += [(class_name, test_name) for test_name in test_names] return tests
332b98ab499ff53ba974d51ac862e7015f616e64
699,770
import re def normalize_whitespace(s: str) -> str: """Convert all whitespace (tabs, newlines, etc) into spaces.""" return re.sub(r"\s+", " ", s, flags=re.MULTILINE)
6d8b65bcdca9838aa0f4d16d158db5d2218cbf24
699,774
def determine_letter(current_score): """ Calculates the letter grade for a given score :param current_score: the score to be evaluated :return: the letter grade that score falls within """ if current_score >= 90: return "A" elif current_score >= 80: return "B" elif current_score >= 70: return "C" elif current_score >= 60: return "D" else: return "F"
324aaa8e28a0cbc298410ecd83ea4eee6d39a970
699,779
from typing import Tuple from typing import Dict def parse_icf(icf_file: str) -> Tuple[Dict, Dict]: """Parse ICF linker file. ST only provides .icf linker files for many products, so there is a need to generate basic GCC compatible .ld files for all products. This parses the basic features from the .icf format well enough to work for the ST's .icf files that exist in `cmsis_device` Args: icf_file: .icf linker file read into a string Returns: (regions, blocks) where `regions` is a map from region_name -> (start_hex, end_hex) `blocks` is a map from block_name -> {feature_1: val_1,...} Raises: IndexError if .icf is malformed (at least compared to how ST makes them) """ symbols = {} regions = {} # region: (start_addr, end_addr) blocks = {} for line in icf_file.split('\n'): line = line.strip() if line == '' or line.startswith('/*') or line.startswith('//'): continue tokens = line.split() if len(tokens) < 2: continue if tokens[0] == 'define': if tokens[1] == 'symbol': symbols[tokens[2]] = tokens[4].strip(';') elif tokens[1] == 'region': regions[tokens[2].split('_')[0]] = (tokens[5], tokens[7].strip('];')) elif tokens[1] == 'block': blocks[tokens[2]] = { tokens[4]: tokens[6].strip(','), tokens[7]: tokens[9] } parsed_regions = { region: (symbols[start] if start in symbols else start, symbols[end] if end in symbols else end) for region, (start, end) in regions.items() } parsed_blocks = { name: {k: symbols[v] if v in symbols else v for k, v in fields.items()} for name, fields in blocks.items() } return (parsed_regions, parsed_blocks)
ddc1288603d0697bf915eb82a712f210f54efacd
699,780
def normalize_attribute(attr): """ Normalizes the name of an attribute which is spelled in slightly different ways in paizo HTMLs """ attr = attr.strip() if attr.endswith(':'): attr = attr[:-1] # Remove trailing ':' if any if attr == 'Prerequisites': attr = 'Prerequisite' # Normalize if attr == 'Note': attr = 'Prerequisite' # Normalize a very special case (Versatile Channeler) if attr == 'Benefits': attr = 'Benefit' # Normalize if attr == 'Leadership Modifiers': attr = 'Benefit' # Normalize a very special case (Leadership) assert attr in ('Prerequisite', 'Benefit', 'Normal', 'Special') return attr.lower()
2cb66878547ee8a98c14bf08261f2610def57a37
699,782
def _parseExpectedWords(wordList, defaultSensitivity=80): """Parse expected words list. This function is used internally by other functions and classes within the `transcribe` module. Expected words or phrases are usually specified as a list of strings. CMU Pocket Sphinx allows for additional 'sensitivity' values for each phrase ranging from *0* to *100*. This function will generate to lists, first with just words and another with specified sensitivity values. This allows the user to specify sensitivity levels which can be ignored if the recognizer engine does not support it. Parameters ---------- wordList : list of str List of words of phrases. Sensitivity levels for each can be specified by putting a value at the end of each string separated with a colon `:`. For example, ``'hello:80'`` for 80% sensitivity on 'hello'. Values are normalized between *0.0* and *1.0* when returned. defaultSensitivity : int or float Default sensitivity to use if a word does not have one specified between 0 and 100%. Returns ------- tuple Returns list of expected words and list of normalized sensitivities for each. Examples -------- Specifying expected words to CMU Pocket Sphinx:: words = [('hello:95', 'bye:50')] expectedWords = zip(_parseExpectedWords(words)) """ defaultSensitivity = defaultSensitivity / 100. # normalized sensitivities = [] if wordList is not None: # sensitivity specified as `word:80` wordListTemp = [] for word in wordList: wordAndSense = word.split(':') if len(wordAndSense) == 2: # specified as `word:80` word, sensitivity = wordAndSense sensitivity = int(sensitivity) / 100. else: word = wordAndSense[0] sensitivity = defaultSensitivity # default is 80% confidence wordListTemp.append(word) sensitivities.append(sensitivity) wordList = wordListTemp return wordList, sensitivities
83512a86ae112de79bd84e1d9ea3ebebcb4cdefd
699,788
def measurement(qreg=int(0), creg=int(0)): """Generate QASM that takes a measurement from a qubit and stores it in a classical register. Args: qreg(int): Number of the Qubit to measure. (default 0) creg(int): Number of the Classical Register to store the measurement to. (default 1) Returns: str: Generated QASM containing measurement instruction.""" # Ensure Integer Variables Have Correct Types if qreg is not None: qreg = int(qreg) if creg is not None: creg = int(creg) # Generate a measurement argument for QASM 2.0. meas_str = f'measure q[{str(qreg)}] -> c[{str(creg)}];' # Return generated measurement argument. return meas_str
9a9a24f390bf0745e7cdfe80bb1893f77161c171
699,794
def adjacent(p): """Return the positions adjacent to position p""" return ( (p[0] + 1, p[1], p[2]), (p[0] - 1, p[1], p[2]), (p[0], p[1] + 1, p[2]), (p[0], p[1] - 1, p[2]), (p[0], p[1], p[2] + 1), (p[0], p[1], p[2] - 1), )
988597e0abd150ae60b556e52a217bd8a136707b
699,796
import re def replace_special_whitespace_chars(text: str) -> str: """It's annoying to deal with nonbreaking whitespace chars like u'xa0' or other whitespace chars. Let's replace all of them with the standard char before doing any other processing.""" text = re.sub(r"\s", " ", text) return text
17be082a827039264cd75fb7459fc31eb7f617dd
699,799
def two_sequences_in_parallel(sequence1, sequence2): """ Demonstrates iterating (looping) through TWO sequences in PARALLEL. This particular example assumes that the two sequences are of equal length and returns the number of items in sequence2 that are bigger than their corresponding item in sequence1. For example, if the sequences are: [11, 22, 10, 44, 33, 12] [55, 10, 30, 30, 30, 30] then this function returns 3, since 55 > 11 and 30 > 10 and 30 > 12. """ # ------------------------------------------------------------------ # The TWO-SEQUENCES-IN-PARALLEL pattern is: # # for k in range(len(sequence1)): # ... sequence1[k] ... sequence2[k] ... # # The above assumes that the sequences are of equal length # (or that you just want to do the length of sequence1). # ------------------------------------------------------------------ count = 0 for k in range(len(sequence1)): if sequence1[k] > sequence2[k]: count = count + 1 return count
c5dbce5f99d5c2efeee4048ec1451ea63f404fef
699,801
def image_crop(src, x1, y1, x2, y2): """ Crop image from (x1, y1) to (x2, y2). Parameters ---------- :param src: Input image in BGR format :param x1: Initial coordinates for image cropping :param y1: Initial coordinates for image cropping :param x2: End coordinates of image cropping :param y2: End coordinates of image cropping """ return src[x1:x2, y1:y2]
6ab70dc644d0d7054ea70fadcf7ec0ca381918d8
699,802
def fill_with_gauss(df, w=12): """ Fill missing values in a time series data using gaussian """ return df.fillna( df.rolling(window=w, win_type="gaussian", center=True, min_periods=1).mean( std=2 ) )
fdfdedaf7968f617ff98df586b89c30053a6c886
699,806
def swap(heights_list, index01, index02): """swap two positions in a list at given indexes Args: heights_list (list): iterable in which swapping occurs index01 (int): index of first element index02 (int): index of second element Returns: list: list with element positions swapped """ heights_list[index01], heights_list[index02] = heights_list[index02], heights_list[index01] return heights_list
f7add4a06a79837766b5840840d17c3247b0bcae
699,807
def create_first_n_1_bits_mask(n, k): """ Return a binary mask of first n bits of 1, k bits of 0s""" if n < 0 or k < 0: raise ValueError("n and k cannot be negative number") if n == 0: return 0 mask = (2 << n) - 1 return mask << k
5a9b637a8973f004da2330c8ebb06ea63fd542c3
699,809
import re def clean_str(string): """ Strip and replace some special characters. """ msg = str(string) msg = msg.replace("\n", " ") msg = re.sub(r"\s+", r" ", msg) msg = re.sub(r"^\s", r"", msg) msg = re.sub(r"\s$", r"", msg) return msg
50132d2c56498f4590fcba7837deb791500f3110
699,810
def char_to_ix(chars): """ Make a dictionary that maps a character to an index Arguments: chars -- list of character set Returns: dictionary that maps a character to an index """ return {ch: i for i, ch in enumerate(chars)}
8bfc5b99c7f5aef6d88276fe4b3ad005ce9a017e
699,811