content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def square(root): """This function calculates the square of the argument value""" # result = num * num return root * root
54049a92a0383c756911a4604161e092d496ce62
7,248
import string from typing import Counter def letter_frequency(seq): """Returns a dictionary with the frequencies of letters in the sequence""" freq = filter(lambda x: x in string.ascii_letters, seq.lower()) freq = dict(Counter(freq).most_common()) freq.update(dict((x, 0) for x in filter(lambda x: x not in freq, string.ascii_lowercase))) return freq
bcbf61526c395bc8df36bf7b3a7a37b25dbe1aba
7,249
def process_map_input(input_lines): """ Find the dimensions of a map using the lines of input :param input_lines: List of string representing the map :return: (x: width, y: height) Tuple """ height = len(input_lines) - 1 width = len(input_lines[0]) return width, height
30662e1466d6d553ddee797c6252453c6f6f7f47
7,252
from datetime import datetime def get_es_index_name(project, meta): """ Get the name for the output ES index :param project: seqr project identifier :param meta: index metadata :return: index name """ return '{project}__structural_variants__{sample_type}__grch{genome_version}__{datestamp}'.format( project=project, sample_type=meta['sampleType'], genome_version=meta['genomeVersion'], datestamp=datetime.today().strftime('%Y%m%d'), ).lower()
fc1245287aed07ddd8d90f33cadc095c22f944a3
7,256
def RegexCheck(re, line_number, line, regex, msg): """Searches for |regex| in |line| to check for a particular style violation, returning a message like the one below if the regex matches. The |regex| must have exactly one capturing group so that the relevant part of |line| can be highlighted. If more groups are needed, use "(?:...)" to make a non-capturing group. Sample message: line 6: Use var instead of const. const foo = bar(); ^^^^^ """ def _highlight(match): """Takes a start position and a length, and produces a row of '^'s to highlight the corresponding part of a string. """ return match.start(1) * ' ' + (match.end(1) - match.start(1)) * '^' match = re.search(regex, line) if match: assert len(match.groups()) == 1 return ' line %d: %s\n%s\n%s' % (line_number, msg, line, _highlight(match)) return ''
31e979570eb4e0b251f445555f24fadef8e6879d
7,258
from typing import Union from typing import List def count(obj: Union[int, List]) -> int: """Return the number of integers in obj. >>> count(27) 1 >>> count([4, 1, 8]) 3 >>> count([4]) 1 >>> count([]) 0 >>> count([4, [1, 2, 3], 8]) 5 >>> count([1, [2, 3], [4, 5, [6, 7], 8]]) 8 """ if isinstance(obj, int): return 1 else: return sum(count(i) for i in obj) # if isinstance(obj, int): # return 1 # else: # s = 0 # for lst_i in obj: # s += count(lst_i) # return s
d982b3096dc9c7776b32bab765e2a17769d128e9
7,263
def _single_quote_string(name: str) -> str: # pragma: no cover """Single quote a string to inject it into f-strings, since backslashes cannot be in double f-strings.""" return f"'{name}'"
39868168862f3bd60d8da6168503cbc51fcbda84
7,264
def convert_str_version_number(version_str): """ Convert the version number as a integer for easy comparisons :param version_str: str of the version number, e.g. '0.33' :returns: tuple of ints representing the version str """ version_numbers = version_str.split('.') if len(version_numbers) != 2: raise ValueError(f"Version number is malformed: '{version_str}'") return tuple(int(part) for part in version_numbers)
b550b7d07d9b226800de261f792bf0995ac21738
7,265
def create_virtual_cdrom_spec(client_factory, datastore, controller_key, file_path, cdrom_unit_number): """Builds spec for the creation of a new Virtual CDROM to the VM.""" config_spec = client_factory.create( 'ns0:VirtualDeviceConfigSpec') config_spec.operation = "add" cdrom = client_factory.create('ns0:VirtualCdrom') cdrom_device_backing = client_factory.create( 'ns0:VirtualCdromIsoBackingInfo') cdrom_device_backing.datastore = datastore cdrom_device_backing.fileName = file_path cdrom.backing = cdrom_device_backing cdrom.controllerKey = controller_key cdrom.unitNumber = cdrom_unit_number cdrom.key = -1 connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo') connectable_spec.startConnected = True connectable_spec.allowGuestControl = False connectable_spec.connected = True cdrom.connectable = connectable_spec config_spec.device = cdrom return config_spec
8f62c70c432c368f0f94b3c1dc48f38d75093b07
7,270
def plot_mean_and_CI(axes, mean, lb, ub, label, freqs, linestyle='-'): """ Plot mean and confidence boundaries. Args: axes: plt.axes mean: np.ndarray lb: np.ndarray ub: np.ndarray label: string freqs: list linestyle: string Returns: plt.axes """ axes.fill_between(freqs, ub, lb, alpha=.25) axes.plot(freqs, mean, label=label, marker = 'o', linestyle=linestyle) return axes
77b7ecaa6dddae474495c0a65efafbf08717584c
7,273
def overlap_slices(large_array_shape, small_array_shape, position): """ Modified version of `~astropy.nddata.utils.overlap_slices`. Get slices for the overlapping part of a small and a large array. Given a certain position of the center of the small array, with respect to the large array, tuples of slices are returned which can be used to extract, add or subtract the small array at the given position. This function takes care of the correct behavior at the boundaries, where the small array is cut of appropriately. Parameters ---------- large_array_shape : tuple Shape of the large array. small_array_shape : tuple Shape of the small array. position : tuple Position of the small array's center, with respect to the large array. Coordinates should be in the same order as the array shape. Returns ------- slices_large : tuple of slices Slices in all directions for the large array, such that ``large_array[slices_large]`` extracts the region of the large array that overlaps with the small array. slices_small : slice Slices in all directions for the small array, such that ``small_array[slices_small]`` extracts the region that is inside the large array. """ # Get edge coordinates edges_min = [int(pos - small_shape // 2) for (pos, small_shape) in zip(position, small_array_shape)] edges_max = [int(pos + (small_shape - small_shape // 2)) for (pos, small_shape) in zip(position, small_array_shape)] # Set up slices slices_large = tuple(slice(max(0, edge_min), min(large_shape, edge_max)) for (edge_min, edge_max, large_shape) in zip(edges_min, edges_max, large_array_shape)) slices_small = tuple(slice(max(0, -edge_min), min(large_shape - edge_min, edge_max - edge_min)) for (edge_min, edge_max, large_shape) in zip(edges_min, edges_max, large_array_shape)) return slices_large, slices_small
ef86928b3ef619f209247bb72e2e391d14d541c4
7,277
import torch def create_batch(sentences, params, dico): """ Convert a list of tokenized sentences into a Pytorch batch args: sentences: list of sentences params: attribute params of the loaded model dico: dictionary returns: word_ids: indices of the tokens lengths: lengths of each sentence in the batch """ bs = len(sentences) slen = max([len(sent) for sent in sentences]) word_ids = torch.LongTensor(slen, bs).fill_(params.pad_index) for i in range(len(sentences)): sent = torch.LongTensor([dico.index(w) for w in sentences[i]]) word_ids[:len(sent), i] = sent lengths = torch.LongTensor([len(sent) for sent in sentences]) return word_ids, lengths
c08430651ea20f633169187f62f7b22e09bbd17e
7,283
from typing import List def make_pos_array_and_diff(trials: List[dict]) -> List[dict]: """ Parameters ---------- trials : Non-filtered refinement trials Returns ------- A list of dictionaries with updated position and difference of neighbours """ _trials = trials[:] for i, c in enumerate(trials): x_array = sorted([d['pos'] for d in trials[i]['comb']]) _trials[i]['pos_array'] = x_array _trials[i]['diff_array'] = [x_array[i] - x_array[i + 1] for i in range(len(x_array) - 1)] # print(x_array) # print(len(trials)) # trials[i]['comb'] return _trials
ebdb0a63d9399985d11b06ec28d4032a54b22f89
7,284
def get_ast_field_name(ast): """Return the normalized field name for the given AST node.""" replacements = { # We always rewrite the following field names into their proper underlying counterparts. '__typename': '@class' } base_field_name = ast.name.value normalized_name = replacements.get(base_field_name, base_field_name) return normalized_name
c5cf0acbca963e7dc0d853064a2599b732d6b0d1
7,287
def olivine(piezometer=None): """ Data base for calcite piezometers. It returns the material parameter, the exponent parameter and a warn with the "average" grain size measure to be use. Parameter --------- piezometer : string or None the piezometric relation References ---------- | Jung and Karato (2001) https://doi.org/10.1016/S0191-8141(01)00005-0 | Van der Wal et al. (1993) https://doi.org/10.1029/93GL01382 Assumptions ----------- - The piezometer of Van der Wal (1993) requires entering the linear mean apparent grain size in microns calculated from equivalent circular diameters (ECD) with no stereological correction. The function will convert automatically this value to linear intercept (LI) grain size using the De Hoff and Rhines (1968) correction. It is assumed that LI was multiplied by 1.5 (correction factor), the final relation is: LI = (1.5 / sqrt(4/pi)) * ECD - The piezometer of Jung and Karato (2001) requires entering the linear mean apparent grain size in microns calculated from equivalent circular diameters (ECD) with no stereological correction. The function will convert automatically this value to linear intercept (LI) grain size using the De Hoff and Rhines (1968) empirical equation. Since LI was originally multiplied by 1.5 (correction factor), the final relation is: LI = (1.5 / sqrt(4/pi)) * ECD """ if piezometer is None: print('Available piezometers:') print("'Jung_Karato'") print("'VanderWal_wet'") print("'Tasaka_wet'") return None elif piezometer == 'Jung_Karato': B, m = 5461.03, 0.85 warn = 'Ensure that you entered the apparent grain size as the arithmetic mean in linear scale' linear_interceps = True correction_factor = 1.5 elif piezometer == 'VanderWal_wet': B, m = 1355.4, 0.75 warn = 'Ensure that you entered the apparent grain size as the arithmetic mean in linear scale' linear_interceps = True correction_factor = 1.5 elif piezometer == 'Tasaka_wet': B, m = 719.7, 0.75 warn = 'Ensure that you entered the apparent grain size as the arithmetic mean in linear scale' linear_interceps = False correction_factor = 1.2 else: olivine() raise ValueError('Piezometer name misspelled. Please choose between valid piezometers') return B, m, warn, linear_interceps, correction_factor
387ea9413acdf551abe108ba5ba7dda51e162c51
7,288
from typing import Tuple import re import typing def hex_to_rgb(hex: str, hsl: bool = False) -> Tuple[int, int, int]: """Converts a HEX code into RGB or HSL. Taken from https://stackoverflow.com/a/62083599/7853533 Args: hex (str): Takes both short as well as long HEX codes. hsl (bool): Converts the given HEX code into HSL value if True. Returns: Tuple[int, int, int]: Tuple of RGB values. Raises: ValueError: If given value is not a valid HEX code. """ if re.compile(r"#[a-fA-F0-9]{3}(?:[a-fA-F0-9]{3})?$").match(hex): div = 255 if hsl else 0 if len(hex) <= 4: rgb = tuple( int(int(hex[i] * 2, 16) / div) if div else int(hex[i] * 2, 16) for i in (1, 2, 3) ) else: rgb = tuple( int(int(hex[i : i + 2], 16) / div) if div else int(hex[i : i + 2], 16) for i in (1, 3, 5) ) rgb = typing.cast(Tuple[int, int, int], rgb) return rgb raise ValueError(f"{hex} is not a valid HEX code.")
2c912dacfcf6c52c21c94c5d7bb9b9763279245d
7,290
def filter_rows_via_column_matching(data, column, index=0): """ Filter data, by keeping rows whose particular field index matches the column criteria It takes parameters: data (data in the form of a list of lists) column (used as the match criteria for a particular field of the data) and optionally: index (by default 0, it is the data field used to match against column) Note that column can be many iterables, but probably ought to be a set It returns a filtered list of lists """ return [ row for row in data if row[index] in column ]
fcd5548677290a34d94c2eb8d5fefcb2bb50f0b4
7,295
import re def _normalize_name(name: str) -> str: """ Normalizes the given name. """ return re.sub(r"[^a-zA-Z0-9.\-_]", "_", name)
b38a90c05b0a6ec5a26db6d0da85bed2ae802cea
7,296
def filter_claims_by_date(claims_data, from_date, to_date): """Return claims falling in the specified date range.""" return [ claim for claim in claims_data if (from_date <= claim.clm_from_dt <= to_date) ]
d1568d0fd52382bdb3f1f02414f591d5f4da3596
7,297
import json def encode_json(struct): """Encode a structure as JSON bytes.""" return bytes(json.dumps(struct), "utf-8")
6724c0a687a98230a32fef81b3a4447c12d164fc
7,302
def _float(value): """Return env var cast as float.""" return float(value)
254b1e3a542c5a74153cd58d3f43e86dab964028
7,308
def isjsonclass(class_: type) -> bool: """Check if a class is jsonclass. Args: class_ (type): The class to check. Returns: bool: True if it's a jsonclass, otherwise False. """ return hasattr(class_, '__is_jsonclass__')
3234cf62beb03aa968888dd8ec3b65f4c5f4cab3
7,312
def needs_min_max_values(mode, buckets): """ Returns True, if an encoding mode needs minimum and maximum column values, otherwise False """ return not buckets and mode in ['one-hot', 'one-hot-gaussian', 'one-hot-gaussian-fluent', 'unary', 'unary-gaussian', 'unary-gaussian-fluent']
50ae2b899e5957347061dd59905290506c460093
7,317
import re def remove_plus_signs(_s: str) -> str: """Removes plus signs from string""" return re.sub(pattern=r'\+', repl=r'', string=_s)
53cf3117221ce82578a20d75e7eb807c2d41b8fc
7,319
def Intersection(S1x, S1y, D1x, D1y, S2x, S2y, D2x, D2y): """ Find intersection of 2 line segments :param S1x: x coordinate of segment 1's start point :param S1y: y coordinate of segment 1's start point :param D1x: x coordinate of segment 1's end point :param D1y: y coordinate of segment 1's end point :param S2x: x coordinate of segment 2's start point :param S2y: y coordinate of segment 2's start point :param D2x: x coordinate of segment 2's end point :param D2y: y coordinate of segment 2's end point :return: Intersection point [x,y] """ if ((D1y - S1y) * (S2x - D2x) - (D2y - S2y) * (S1x - D1x)) == 0: return [None, None] else: x = ((S2x - D2x) * (((D1y - S1y) * (S1x) + (S1x - D1x) * (S1y))) - (S1x - D1x) * ((D2y - S2y) * (S2x) + (S2x - D2x) * (S2y))) / ((D1y - S1y) * (S2x - D2x) - (D2y - S2y) * (S1x - D1x)) y = ((D1y - S1y) * ((D2y - S2y) * (S2x) + (S2x - D2x) * (S2y)) - (D2y - S2y) * (((D1y - S1y) * (S1x) + (S1x - D1x) * (S1y)))) / ((D1y - S1y) * (S2x - D2x) - (D2y - S2y) * (S1x - D1x)) return [x,y]
2dba8839ebf24b55fe3c1b7797e53c7e0c3ed72a
7,321
def collect_nodes(points, highlighted_nodes=[], color='#79FF06', highlighted_color='blue', width=200, highlighted_width=400): """ Собирает необходимые нам вершины в нужный формат Parameters ---------- points : [str, str, ...] Вершины графа. highlighted_nodes : [str, str, ...], optional Выделенные вершины графа. По умолчанию []. color : str, optional Цвет обычных вершин. По умолчанию '#79FF06'. highlighted_color : str, optional Цвет выделенных вершин. По умолчанию 'blue'. width : int, optional Ширина обычных вершин. По умолчанию 200. highlighted_width : int, optional Ширина выделенных вершин. По умолчанию 400. Returns ------- result : [(str, {'color': str, 'width': int}), (str, {'color': str, 'width': int}), ...] Список вершин с их параметрами. """ result = [] for p in points: if p in highlighted_nodes: result.append((p, {"color": highlighted_color, "width": highlighted_width})) else: result.append((p, {"color": color, "width": width})) return result
a3a218b5f8c8c25a0f13a4154f12779a84726f9d
7,324
import io def read_txt(filename, encoding='utf-8'): """Text file reader.""" with io.open(filename, 'r', encoding=encoding) as f: return f.read()
2ca0d80bddc49b793e8cbc63c513410154b4d460
7,326
def get_job_type(name): """Returns job type based on its name.""" if 'phase1' in name: return 'phase1' elif 'phase2' in name: return 'phase2' elif 'dfg' in name: return 'dfg' else: return 'other'
50db4a7833028b0a0944a4b915d82a8cabf91595
7,327
def is_resnet(name): """ Simply checks if name represents a resnet, by convention, all resnet names start with 'resnet' :param name: :return: """ name = name.lower() return name.startswith('resnet')
6310d849b76a1006c7c2e97405aa9f0ebc53a78b
7,328
def first_existing(d, keys): """Returns the value of the first key in keys which exists in d.""" for key in keys: if key in d: return d[key] return None
eb9f34f1f5adb0a8e44127fe777e35ca8d36dc04
7,336
def VerboseCompleter(unused_self, event_object): """Completer function that suggests simple verbose settings.""" if '-v' in event_object.line: return [] else: return ['-v']
e536e221f8f3465f72071d969b11b6623359cf58
7,342
def gauss_to_tesla(gauss): """Converts gauss to tesla""" return gauss*1e-4
4f0239432a3436fd5c6cad4ae9747c8849289f34
7,347
def createKey(problemData): """ Creates the key for a given 'problemData' list of number of item types. """ key = '' for itData in problemData: key += str(itData) + ',' # Remove the last comma return key[:-1]
420a4e96dc6442ba2ae18f4bca3d8a10f8a19284
7,348
def get_hashtag_spans(tokens): """ Finds the spans (start, end) of subtokes in a list of tokens Args: tokens: list[str] Returns: spans: list[tuple[int]] """ is_part = ["##" in t for t in tokens] spans = [] pos_end = -1 for pos_start, t in enumerate(is_part): if pos_start <= pos_end: continue if t: last_pos = len(is_part[pos_start:]) - 1 for j, t_end in enumerate(is_part[pos_start:]): if not t_end: pos_end = pos_start + j break if j == last_pos: pos_end = pos_start + j + 1 spans.append((pos_start, pos_end)) return spans
2e70466370e1171a2d29636d13c908c2e8b8e30e
7,353
def roundToNearest(number, nearest): """ Rounds a decimal number to the closest value, nearest, given Arguments: number: [float] the number to be rounded nearest: [float] the number to be rouned to Returns: rounded: [float] the rounded number """ A = 1/nearest rounded = round(number*A)/A return rounded
5f3974611b529e93ae8157182ff8b7dbc100a234
7,354
def compare_dict_keys(d1, d2): """ Returns [things in d1 not in d2, things in d2 not in d1] """ return [k for k in d1 if not k in d2], [k for k in d2 if not k in d1]
4b68c06d1598e325c5baa5ad8eefaa7af1e82d27
7,356
def project_name(settings_dict): """Transform the base module name into a nicer project name >>> project_name({'DF_MODULE_NAME': 'my_project'}) 'My Project' :param settings_dict: :return: """ return " ".join( [ x.capitalize() for x in settings_dict["DF_MODULE_NAME"].replace("_", " ").split() ] )
07411942978ad769d25234f8c7286aaddc365470
7,359
def parse_config_vars(config_vars): """Convert string descriptions of config variable assignment into something that CrossEnvBuilder understands. :param config_vars: An iterable of strings in the form 'FOO=BAR' :returns: A dictionary of name:value pairs. """ result = {} for val in config_vars: try: name, value = val.split('=', 1) except ValueError: raise ValueError("--config-var must be of the form FOO=BAR") result[name] = value return result
1bc1b96b6a2b0bf8e42fca0bb4ee9601c883b124
7,360
def add_malicious_key(entity, verdict): """Return the entity with the additional 'Malicious' key if determined as such by ANYRUN Parameters ---------- entity : dict File or URL object. verdict : dict Task analysis verdict for a detonated file or url. Returns ------- dict The modified entity if it was malicious, otherwise the original entity. """ threat_level_text = verdict.get('threatLevelText', '') if threat_level_text.casefold() == 'malicious activity': entity['Malicious'] = { 'Vendor': 'ANYRUN', 'Description': threat_level_text } return entity
a20ba12ae04d09047f228a26ef6f39e334225cb3
7,362
from typing import Counter def count_terms(terms: list) -> dict: """ Count the number of terms :param terms: term list :return dict_term: The dictionary containing terms and their numbers """ entity_dict = dict(Counter(terms)) print('There are %s entities in total.\n' % entity_dict.__len__()) # print({key: value for key, value in entity_dict.items()}) return entity_dict
77e362894fbbae3d0cec99daea845734d30e8a2d
7,364
def pretty_duration(seconds): """Return a pretty duration string Parameters ---------- seconds : float Duration in seconds Examples -------- >>> pretty_duration(2.1e-6) '0.00ms' >>> pretty_duration(2.1e-5) '0.02ms' >>> pretty_duration(2.1e-4) '0.21ms' >>> pretty_duration(2.1e-3) '2.1ms' >>> pretty_duration(2.1e-2) '21ms' >>> pretty_duration(2.1e-1) '0.21s' >>> pretty_duration(2.1) '2.10s' >>> pretty_duration(12.1) '12.1s' >>> pretty_duration(22.1) '22s' >>> pretty_duration(62.1) '1:02' >>> pretty_duration(621.1) '10:21' >>> pretty_duration(6217.1) '1:43:37' """ miliseconds = seconds * 1000 if miliseconds < 1: return "{:.2f}ms".format(miliseconds) elif miliseconds < 10: return "{:.1f}ms".format(miliseconds) elif miliseconds < 100: return "{:.0f}ms".format(miliseconds) elif seconds < 10: return "{:.2f}s".format(seconds) elif seconds < 20: return "{:.1f}s".format(seconds) elif seconds < 60: return "{:.0f}s".format(seconds) else: minutes = seconds // 60 seconds = int(seconds - minutes * 60) if minutes < 60: return "{minutes:.0f}:{seconds:02}".format(**locals()) else: hours = minutes // 60 minutes = int(minutes - hours * 60) return "{hours:.0f}:{minutes:02}:{seconds:02}".format(**locals())
ceec602cb07ab5c27831c4ed9e1cd552c5b9dde8
7,365
def getsize(datadescriptor): """Get the size of a data descriptor tuple.""" if datadescriptor[0] == 'reg': size = datadescriptor[1][2] elif datadescriptor[0] == 'mem': size = datadescriptor[1][1] elif datadescriptor[0] == 'heap': size = datadescriptor[1][2] elif datadescriptor[0] == 'perp': size = datadescriptor[1][2] elif datadescriptor[0] == 'pmem': size = datadescriptor[1][2] else: return (15, "Not a supported destination type.") return (0, size)
feaaa9d0698b58649a55c53ba399a46ba81520b6
7,375
def to_lowercase(word_list): """Convert all characters to lowercase from list of tokenized word_list Keyword arguments: word_list: list of words """ lowercase_word_list = [word.lower() for word in word_list] return lowercase_word_list
025e3edaa79723f8656d10a8d52fe16a402644ae
7,379
def _generate_csv_header_line(*, header_names, header_prefix='', header=True, sep=',', newline='\n'): """ Helper function to generate a CSV header line depending on the combination of arguments provided. """ if isinstance(header, str): # user-provided header line header_line = header + newline else: if not (header is None or isinstance(header, bool)): raise ValueError(f"Invalid value for argument `header`: {header}") else: if header: header_line = header_prefix + sep.join(header_names) + newline else: header_line = "" return header_line
b9a7f32404a432d2662c43f4fe6444241698bf37
7,383
def filter_labeled_genes(genes): """Filter genes which already have a label and return number of labels. Args: genes(dict): dictionary of genes {g_name: gene object} Returns: dict: dictionary of genes without label {g_name: gene object} int: number of distinct labels in the set of labeled genes """ unlabeled_genes = {} labels = set() for g_name, gene in genes.items(): if not gene.plot_label: unlabeled_genes[g_name] = gene else: labels.add(gene.plot_labelID) num_labels = len(labels) return unlabeled_genes, num_labels
4d50580a07ad6825b4c28e7c91780f1964568056
7,384
def get_floss_params(str_floss_options, filename): """Helper routine to build the list of commandline parameters to pass to Floss.""" # First parameter is the name of the Floss "main" routine. list_floss_params = ['main'] # Add the options from app.config list_options = str_floss_options.split(",") for option in list_options: list_floss_params.append(option) # Add the filename of the binary file. list_floss_params.append(filename) return list_floss_params
e637c25d299c8217fef31b85a2610ec46e53d1f3
7,385
def is_leap_year(year: int) -> bool: """Whether or not a given year is a leap year. If year is divisible by: +------+-----------------+------+ | 4 | 100 but not 400 | 400 | +======+=================+======+ | True | False | True | +------+-----------------+------+ Args: year (int): The year in question Returns: bool: True if year is a leap year, false otherwise """ def is_div(x: int) -> bool: return year % x == 0 return is_div(4) and ((not is_div(100)) or is_div(400))
e4cca9a2b9f0475aadc763fed679eee8b5dddc4a
7,387
def borders(district, unit): """Check if a unit borders a district.""" if district == []: return True neighbour_coords = [(unit.x+i, unit.y+j) for i in [1, 0, -1] for j in [1, 0, -1] if bool(i) ^ bool(j)] district_coords = [(d_unit.x, d_unit.y) for d_unit in district] return bool([i for i in neighbour_coords if i in district_coords])
d95bf55b54f0df63980236def80610dcdc6cbfeb
7,389
def soft_crossentropy(predicted_logprobs, target_probs): """ Cross-entropy loss capable of handling soft target probabilities. """ return -(target_probs * predicted_logprobs).sum(1).mean(0)
8f6f0168c67cd0b3f432a5c91c7f4069c54de7c8
7,396
def tflops_per_second(flops, dt): """ Computes an effective processing rate in TFLOPS per second. TFLOP/S = flops * / (dt * 1E12) Args: flops: Estimated FLOPS in the computation. dt: Elapsed time in seconds. Returns: The estimate. """ return flops / (1E12 * dt)
f244632e1378a69ea55d4a994a9711bd3a2dca2a
7,399
def is_valid(value, cast_fn, expected_data_type, allow_none=False): """ Checks whether a value can be converted using the cast_fn function. Args: value: Value to be considered cast_fn: Function used to determine the validity, should throw an exception if it cannot expected_data_type: string name of the expected data allow_none: Boolean determining if none is valid Returns: tuple: **valid (boolean)**: whether it could be casted **msg (string)**: Msg reporting what happen """ try: # Check for a non-none value if str(value).lower() != 'none': value = cast_fn(value) valid = True msg = None # Handle the error for none when not allowed elif not allow_none and str(value).lower() == 'none': valid = False msg = 'Value cannot be None' # Report all good for nones when they are allowed else: valid = True msg = None # Report an exception when the casting goes bad. except Exception: valid = False msg = "Expecting {0} received {1}".format(expected_data_type, type(value).__name__) return valid, msg
ee1a2aca4ba7d437692f5025901f9bf94031434a
7,404
def v_relative(v, met): """Estimates the relative air speed which combines the average air speed of the space plus the relative air speed caused by the body movement. Vag is assumed to be 0 for metabolic rates equal and lower than 1 met and otherwise equal to Vag = 0.3 (M – 1) (m/s) Parameters ---------- v : float air speed measured by the sensor, [m/s] met : float metabolic rate, [met] Returns ------- vr : float relative air speed, [m/s] """ if met > 1: return round(v + 0.3 * (met - 1), 3) else: return v
6dceae6ec076dc800d2aa3e80d7d491d94830580
7,407
def fully_qualified_name(entry): """ Calculates the fully qualified name for an entry by walking the path to the root node. Args: entry: a BeautifulSoup Tag corresponding to an <entry ...> XML node, or a <clone ...> XML node. Raises: ValueError: if entry does not correspond to one of the above XML nodes Returns: A string with the full name, e.g. "android.lens.info.availableApertureSizes" """ filter_tags = ['namespace', 'section'] parents = [i['name'] for i in entry.parents if i.name in filter_tags] if entry.name == 'entry': name = entry['name'] elif entry.name == 'clone': name = entry['entry'].split(".")[-1] # "a.b.c" => "c" else: raise ValueError("Unsupported tag type '%s' for element '%s'" \ %(entry.name, entry)) parents.reverse() parents.append(name) fqn = ".".join(parents) return fqn
68119b640509cd972770f810b80ba1a2ad54f688
7,408
import re def shorten_int_name(interface_name): """ Returns the Cisco shortened interface name from a full one. If the full interface name is invalid, this will return None """ short = None regex = "(\w{2}).*?(\d+(?:/\d+)?(?:/\d+)?)" match = re.match(regex, interface_name) if match is not None: short = "" for group in match.groups(): short += group return short
48a6f730c8d3d2f0abaec299385b5d558cf06a00
7,410
import json def load_json(path: str): """ Load the contents of a json file into a python dictionary """ with open(path) as f: content = json.load(f) return content
b35ae26ca303347a98ea3dd3ca42370279d19a2a
7,413
def V_tank_Reflux(Reflux_mass, tau, rho_Reflux_20, dzeta_reserve): """ Calculates the tank for waste. Parameters ---------- Reflux_mass : float The mass flowrate of Reflux, [kg/s] tau : float The time, [s] rho_Reflux_20 : float The destiny of waste for 20 degrees celcium, [kg/m**3] dzeta_reserve : float The coefificent of reserve, [dismensionless] Returns ------- V_tank_Reflux : float The tank for Reflux, [m**3] References ---------- &&&&&&&&&&&& """ return Reflux_mass * tau * dzeta_reserve / rho_Reflux_20
3e1adc446bbe2dd936663af895c59222cd000a48
7,419
import torch def attention_aggregator(embedding_lists, weights, embed_dim=0) -> torch.Tensor: """ Returns a weighted sum of embeddings :param embedding_lists: list of n tensors of shape (l, K) embedding tensors (l can vary) :param weights: list of n tensors of shape (l,) weights (l can vary, but matches embeddings) :param embed_dim: K, used if the embedding_lists is empty (n = 0) to return a (0, K) empty tensor :return: weighted sum of embeddings, shape (n, K) """ assert len(embedding_lists) == len(weights), f"aggregation weights different length to embeddings! weights len " \ f"{len(weights)}, embeds len {len(embedding_lists)}" if len(embedding_lists): if len(embedding_lists) == 1: return embedding_lists[0] # (1, K) tensor with the single embedding (ie: no aggregation necessary) aggregated = torch.stack([torch.sum(emb * w.view(-1, 1), dim=0) for emb, w in zip(embedding_lists, weights)]) return aggregated # (n, K) tensor of aggregated embeddings else: return torch.tensor([]).view(-1, embed_dim)
88fe01d8baea23321593bf88fd522eb0ef379be9
7,422
def _calculate_application_risk(module): """ Function to calculate Software risk due to application type. This function uses a similar approach as RL-TR-92-52 for baseline fault density estimates. The baseline application is Process Control software. Every other application is ranked relative to Process Control using the values in RL-TR-92-52, Worksheet 0 for average fault density. Baseline (low) application risk (A) is assigned a 1. Medium risk is assigned a 2. High risk is assigned a 3. Application risks are defined as: +-------+------------------------------+----------+ | | | Relative | | Index | Application | Risk | +-------+------------------------------+----------+ | 1 | Batch (General) | Low | +-------+------------------------------+----------+ | 2 | Event Control | Low | +-------+------------------------------+----------+ | 3 | Process Control | Low | +-------+------------------------------+----------+ | 4 | Procedure Control | Medium | +-------+------------------------------+----------+ | 5 | Navigation | High | +-------+------------------------------+----------+ | 6 | Flight Dynamics | High | +-------+------------------------------+----------+ | 7 | Orbital Dynamics | High | +-------+------------------------------+----------+ | 8 | Message Processing | Medium | +-------+------------------------------+----------+ | 9 | Diagnostics | Medium | +-------+------------------------------+----------+ | 10 | Sensor and Signal Processing | Medium | +-------+------------------------------+----------+ | 11 | Simulation | High | +-------+------------------------------+----------+ | 12 | Database Management | Medium | +-------+------------------------------+----------+ | 13 | Data Acquisition | Medium | +-------+------------------------------+----------+ | 14 | Data Presentation | Medium | +-------+------------------------------+----------+ | 15 | Decision and Planning Aids | Medium | +-------+------------------------------+----------+ | 16 | Pattern and Image Processing | High | +-------+------------------------------+----------+ | 17 | System Software | High | +-------+------------------------------+----------+ | 18 | Development Tools | High | +-------+------------------------------+----------+ :param module: the :py:class:`rtk.software.CSCI.Model` or :py:class:`rtk.software.Unit.Model` data model to calculate. :return: False if successful or True if an error is encountered. :rtype: bool """ if module.application_id == 0: module.a_risk = 0.0 elif module.application_id in [5, 6, 7, 11, 16, 17, 18]: module.a_risk = 3.0 elif module.application_id in [4, 8, 9, 10, 12, 13, 14, 15]: module.a_risk = 2.0 else: module.a_risk = 1.0 return False
703aaf086aecf717be5c13694a8f1dae9f70a86c
7,423
def get_model_field(model, name): """ Gets a field from a Django model. :param model: A Django model, this should be the class itself. :param name: A Django model's field. :return: The field from the model, a subclass of django.db.models.Model """ return model._meta.get_field(name)
e0f692aff82c20c7817d7de5d1fbeec1b69d3a3d
7,427
def inert_masses(m_1, H, z_m, E_1): """First stage inert masses. Arguments: m_1 (scalar): First stage wet mass [units: kilogram]. H (scalar): Fraction of the recovery vehicle dry mass which is added recovery hardware [units: dimensionless]. z_m (scalar): Fraction of baseline dry mass which is to be recovered [units: dimensionless]. E_1 (scalar): Structural mass ratio w/o reuse hardware [units: dimensionless]. Returns: scalar: First stage inert mass scalar: Recovery vehicle inert mass """ chi_r = H * z_m / (1 - H) m_inert_1 = m_1 * ((1 + chi_r) / (1 + chi_r + (1 - E_1) / E_1)) m_inert_recov_1 = m_1 * ((z_m + chi_r) / (1 + chi_r + (1 - E_1) / E_1)) return m_inert_1, m_inert_recov_1
5698fcb36ef1f532cc8bc1dc0c86a25adc5bcab8
7,429
def buildList(pdList, matrix): """Takes a list of primary datasets (PDs) and the AlCaRecoMatrix (a dictinary) and returns a string with all the AlCaRecos for the selected PDs separated by the '+' character without duplicates.""" alCaRecoList = [] for pd in pdList: alCaRecoList.extend(matrix[pd].split("+")) # remove duplicates converting to a set alCaRecoList = set(alCaRecoList) stringList = '' for alCaReco in alCaRecoList: if stringList == '': stringList += alCaReco else: stringList += '+'+alCaReco return stringList
7e9351f115aac1064068e16f12276ed5506217e4
7,431
import re def _get_ip_addr_num(file_path): """Get the next IPADDR index num to use for adding an ip addr to an ifcfg file. """ num = '' with open(file_path, 'r') as f: data = f.read() data = data.splitlines() for line in data: found = re.search(r'IPADDR(\d?)=', line) if found: if found.group(1) == '': num = 0 else: num = str(int(found.group(1)) + 1) return num
09dfd6bc8a9da240d3044bd6f5b974c69cbebf76
7,433
def manifest_to_file_list(manifest_fn): """ Open a manifest file and read it into a list. Entries in the list are relative, i.e. no leading slash. manifest_fn -- the manifest file to read """ image_manifest_list = [] with open(manifest_fn) as image: image_manifest_list = [x[1:] for x in image.read().splitlines()] return image_manifest_list
982f02e0b00fad20af8d50d44673d65d9bba5a37
7,438
import dis def _opcode(name): """Return the opcode by name from the dis module.""" return dis.opmap[name]
d2c8612138c94da68adcc1b8979395987090157c
7,444
import re def remove_citation(paragraph: str) -> str: """Remove all citations (numbers in side square brackets) in paragraph""" return re.sub(r'\[\d+\]', '', paragraph)
dc88606e69187143d767215ddc098affdbd185d5
7,445
def job_metadata_filename(metadata): """Construct relative filename to job metadata.""" return "data/{metadata}".format(metadata=metadata)
bb5e8dc6c0ec50fed6801b9c67f8234d9115372a
7,447
def get_db_cols(cur, table_name, schema='public', type_map=True): """ Gets the column names of a given table if type_map is true, returns also a dictionary mapping each column name to the corresponding postgres column type """ db_cols_sql = """SELECT column_name, data_type FROM information_schema.columns WHERE table_schema = '{}' AND table_name = '{}'; """.format(schema, table_name) cur.execute(db_cols_sql) res_rows = [row for row in cur][1:] cols = [row[0] for row in res_rows] if type_map: return cols, dict(res_rows) return cols
936952ea0bbc0c165f089e700828ea876d30ec16
7,448
def split_indexes(indexes): """Split indexes list like 1 2 5 in 1 2 and 5.""" left, right = [indexes[0], ], [] left_now = True for i in range(1, len(indexes)): prev = indexes[i - 1] curr = indexes[i] if curr > prev + 1 and left_now: left_now = False if left_now: left.append(curr) else: right.append(curr) return left, right
1bdb3b57226737280b83dbdfa3226dc344eb47c0
7,455
def compute_perc_id(aln): """ Compute percent identity of aligned region on read """ length = len(aln.query_alignment_sequence) edit = dict(aln.tags)['NM'] return 100 * (length - edit)/float(length)
7bc172649a452fc0c26d4e40d3240d709fb76534
7,457
def get_name_from_filename(filename): """Gets the partition and name from a filename""" partition = filename.split('_', 1)[0] name = filename.split('_', 1)[1][:-4] return partition, name
606cfcc998c4a8405c9ea84b95b2c63f683dd114
7,459
import torch def _set_device(disable_cuda=False): """Set device to CPU or GPU. Parameters ---------- disable_cuda : bool (default=False) Whether to use CPU instead of GPU. Returns ------- device : torch.device object Device to use (CPU or GPU). """ # XXX we might also want to use CUDA_VISIBLE_DEVICES if it is set if not disable_cuda and torch.cuda.is_available(): device = torch.device('cuda:0') else: device = torch.device('cpu') return device
1d7d448dd4e4a844201b73c8da4939009e70eb5f
7,462
from pathlib import Path import yaml import json def print_results_from_evaluation_dirs(work_dir_path: Path, run_numbers: list, print_results_only: bool = False) -> None: """Print the aggregated results from multiple evaluation runs.""" def float_representer(dumper, value): text = '{0:.4f}'.format(value) return dumper.represent_scalar(u'tag:yaml.org,2002:float', text) yaml.add_representer(float, float_representer) for run_number in run_numbers: eval_dir_path = work_dir_path / f'evaluation_{run_number}' eval_file_name = f'evaluation_results_{run_number}.json' print(f'--- Evaluation summary run {run_number} ---') with open(eval_dir_path / eval_file_name, 'r') as infile: results = json.load(infile) test_set_name = results['test_set_name'] if print_results_only: results = {key: val for key, val in results.items() if 'result' in key} results['test_set_name'] = test_set_name print(yaml.dump(results))
4be2d893da5f321390c4b49cd4283c0b6f98b4d5
7,463
def center_text(baseline, text): """Return a string with the centered text over a baseline""" gap = len(baseline) - (len(text) + 2) a1 = int(gap / 2) a2 = gap - a1 return '{} {} {}'.format(baseline[:a1], text, baseline[-a2:])
c5683198cf1f28a38d307555943253bd71fe76de
7,466
def _compute_teleport_distribution_from_ratings(user_rating, all_movies): """ returns the teleporting distribution as explained in the homework if a movie M has been rated, its probability is: RATE_M / SUM_OF_ALL_RATINGS else, its probability is: 0 :param user_rating: a dict of (movie_id, rating) :param all_movies: a set of movie ids, either rated or not. It is used for filter the movies that have no rating, and then their probability will be set to 0. :return: """ distribution = {} rating_sum = sum(user_rating.values()) for movie_id, rating in user_rating.items(): distribution[movie_id]=rating/rating_sum for not_rated_movie in filter(lambda x: x not in distribution, all_movies): distribution[not_rated_movie] = 0 return distribution
7a88cf8a69c9fafc70e14d9337f0af25829bfb20
7,471
import ntpath def path_base_and_leaf(path): """ Splits path to a base part and a file or directory name, as in the following example: path: '/a/b'; base: '/a'; leaf: 'b' """ head, tail = ntpath.split(path) if not tail: # in case there is trailing slash at the end of path return {'base': ntpath.split(head)[0], 'leaf': ntpath.basename(head)} return {'base': head, 'leaf': tail}
956daa06f87cc60c8e304fa129fb86e49c4776ce
7,472
import re import zipfile def instance_name_from_zip(path): """Determines the instance filename within a SEC EDGAR zip archive.""" re_instance_name = re.compile(r'.+-\d{8}\.xml') for name in zipfile.ZipFile(path).namelist(): if re_instance_name.fullmatch(name): return name raise RuntimeError('Zip archive does not contain a valid SEC instance file.')
59b2154d433e500e9b0cdf39ee70d4c058da1d06
7,475
import re def has_number(name): """判断名name内是否出现了数字(包括中文的数字)""" if bool(re.search(r'\d',name)): return True num_str = ['一','二','三','四','五','六','七','八','九','十'] for s in num_str: if s in name: return True return False
56dec9664e945d852cbfee4791f386aaab15f215
7,478
def cohort_to_int(year, season, base=16): """cohort_to_int(year, season[, base]) Converts cohort tuple to a unique sequential ID. Positional arguments: year (int) - 2-digit year season (int) - season ID Keyword arguments: base (int) - base year to treat as 0 Returns: (int) - integer representing the number of seasons since the beginning of the base year """ return 3*(year - base) + season
1f1981eb6c43ab6f77abf6d04ba3b92d9053953d
7,479
def dict_of_transition_matrix(mat): """ Convert a transition matrix (list of list or numpy array) to a dictionary mapping (state, state) to probabilities (as used by :class:`pykov.Chain`).""" if isinstance(mat, list): return {(i, j): mat[i][j] for i in range(len(mat)) for j in range(len(mat[i]))} else: return {(i, j): mat[i, j] for i in range(len(mat)) for j in range(len(mat[i]))}
b823ff496a751f4ffe305a31f1c1d019f7a25d33
7,481
from typing import Tuple import hashlib def hashfile(path: str, blocksize: int = 65536) -> Tuple[str, str]: """Calculate the MD5 hash of a given file Args: path ()str, os.path): Path to the file to generate a hash for blocksize (int, optional): Memory size to read in the file Default: 65536 Returns: hash (str): The HEX digest hash of the given file path (str): The filepath that generated the hash """ # Instatiate the hashlib module with md5 hasher = hashlib.md5() # Open the file and instatiate the buffer f = open(path, "rb") buf = f.read(blocksize) # Continue to read in the file in blocks while len(buf) > 0: hasher.update(buf) # Update the hash buf = f.read(blocksize) # Update the buffer f.close() return hasher.hexdigest(), path
e38e6622534f27bed109a2e2b71373503ca4e7b0
7,483
import pathlib def package_data() -> pathlib.Path: """ Returns the absolute path to the circe/data directory. """ return pathlib.Path(__file__).parents[1].joinpath("data")
19d8fa28ba872f8633e6efddb310d30264d831e6
7,484
import re def changeFileNoInFilePath(path: str, fileNo: int) -> str: """replaces the number in the path with the given number.""" separator = r"[0-9]+\." splitted_path = re.split(separator, path, 1) new_path = splitted_path[0] + str(fileNo) + "." + splitted_path[1] return new_path
070fbe30d2937b57ef601fb764cf68ec219b9c95
7,485
import json def load_json(path): """Load json from file""" json_object = json.load(open(path)) return json_object
17db7327b6dac16aaeaff2354f828646eff695b2
7,491
def _bin_labels_to_segments(bin_labels: list) -> list[tuple]: """ Convert bin labels (time-axis list data) to segment data >>> _bin_labels_to_segments(['female'] * 5 + ['male'] * 10 + ['noise'] * 5) [('f', 0, 5), ('bbb', 5, 15), ('v', 15, 20)] """ if len(bin_labels) == 0: return [] current_label = None segment_start = -1 ret = [] i = 0 for i, e in enumerate(bin_labels): if e != current_label: if current_label is not None: ret.append((current_label, segment_start, i)) current_label = e segment_start = i ret.append((current_label, segment_start, i + 1)) return ret
6b0eafdaf6affee33a3b655ba8ae7aebf2b38746
7,493
def build_efficiencies(efficiencies, species_names, default_efficiency=1.0): """Creates line with list of third-body species efficiencies. Parameters ---------- efficiencies : dict Dictionary of species efficiencies species_names : dict of str List of all species names default_efficiency : float, optional Default efficiency for all species; will be 0.0 for reactions with explicit third body Returns ------- str Line with list of efficiencies """ # Reactions with a default_efficiency of 0 and a single entry in the efficiencies dict # have an explicit third body specified. if len(efficiencies) == 1 and not default_efficiency: return '' reduced_efficiencies = {s:efficiencies[s] for s in efficiencies if s in species_names} return ' '.join([f'{s}:{v}' for s, v in reduced_efficiencies.items()])
a8f8912cd290b86697c67465b4aed18220a8c889
7,494
def _inverse_lookup(dictionary, value): """Does an inverse lookup of key from value""" return [key for key in dictionary if dictionary[key] == value]
4ad34b27fbc35b3bae95bcb8442d1a2f7df94e9f
7,496
def checkdeplaid(incidence): """ Given an incidence angle, select the appropriate deplaid method. Parameters ---------- incidence : float incidence angle extracted from the campt results. """ if incidence >= 95 and incidence <= 180: return 'night' elif incidence >=90 and incidence < 95: return 'night' elif incidence >= 85 and incidence < 90: return 'day' elif incidence >= 0 and incidence < 85: return 'day' else: return False
806ef360e7b5b3d7138d88be2f83267e7668d71e
7,501
def determine_high_cor_pair(correlation_row, sorted_correlation_pairs): """Select highest correlated variable given a correlation row with columns: ["pair_a", "pair_b", "correlation"]. For use in a pandas.apply(). Parameters ---------- correlation_row : pandas.core.series.series Pandas series of the specific feature in the pairwise_df sorted_correlation_pairs : pandas.DataFrame.index A sorted object by total correlative sum to all other features Returns ------- The feature that has a lower total correlation sum with all other features """ pair_a = correlation_row["pair_a"] pair_b = correlation_row["pair_b"] if sorted_correlation_pairs.get_loc(pair_a) > sorted_correlation_pairs.get_loc( pair_b ): return pair_a else: return pair_b
36eccfe0ffb0ac43caf49fe4db8c35c58d0fa29c
7,503
def create_nonlocal_gateway_cluster_name(namespace: str) -> str: """Create the cluster name for the non-local namespace that uses a gateway.""" return "remote-{0}-gateway".format(namespace)
9ca9758a7ee68ede6e57a7f50f2d772b45ee844b
7,507
def get_subset(container, subset_bounds): """Returns a subset of the given list with respect to the list of bounds""" subset = [] for bound in subset_bounds: subset += container[bound[0]: bound[1]] return subset
4932ecba987c4936f9f467f270c6c07fd8681840
7,511
def yesnoquery(message): """ Displays `message` and waits for user Y/N input. Returns Boolean where true means Y. """ useryn = None while useryn is None: if not isinstance(message, str): raise ValueError("Must pass a valid string to query") useryn = input(message).lower() if useryn != "y" and useryn != "n": print("Must enter either a 'Y' or 'N'", useryn) useryn = None if useryn == "y": return True elif useryn == "n": return False else: return -1
87ec3cb01e4a2e52ce1cd900e5446cbab9a05373
7,518
def solution(X, A): """Find the earliest time that a frog can jump to position X. In order to reach X, a leaf must be present at every position from 1 to X. Args: X (int): The position that the frog must reach. A (list): A list of integers from 1 to X, where A[k] represents a leaf falling at minute k into position A[k]. Returns: int: The number of minutes that the frog must wait. Complexity: Time: O(N) Space: O(X) """ counter = [False] * X total = 0 for i, val in enumerate(A): if (val < 1) or (val > X): raise ValueError if not counter[val-1]: counter[val-1] = True total += 1 if total == X: return i else: return -1
d1fec5a3ec4c6dc06cd0feab295c90cb4c920ced
7,524
import math def moments_get_orientation(m): """Returns the orientation in radians from moments. Theta is the angle of the principal axis nearest to the X axis and is in the range -pi/4 <= theta <= pi/4. [1] 1. Simon Xinmeng Liao. Image analysis by moments. (1993). """ theta = 0.5 * math.atan( (2 * m['mu11']) / (m['mu20'] - m['mu02']) ) return theta
0b75e86e324dccd5fe2c4332dfeb15d63a417b9b
7,525
def get_article_case(article, word): """Determines the correct article casing based on the word casing""" return article.capitalize() if word[0].istitle() else article
603810cb60c3719c102afe024cdc0ce474d37bfa
7,526
import collections def deep_convert_to_plain_dict(an_odict): """ Recursively convert `an_odict` and any of its dictionary subelements from `collections.OrderedDict`:py:class: to plain `dict`:py:class: .. note:: This is naive, in that it will not properly handle dictionaries with recursive object references. :Args: an_odict a (presumably) `collections.OrderedDict`:py:class: to convert :Returns: an "unordered" (i.e., plain) `dict`:py:class: with all ordered dictionaries converted to `dict`:py:class: """ a_dict = {} for (key, value) in an_odict.items(): if type(value) is collections.OrderedDict: a_dict[key] = deep_convert_to_plain_dict(value) else: a_dict[key] = value return a_dict
0a463981909153d4beee64fbbf5fad489adf78ac
7,527
def steps(number): """ Count steps needed to get to 1 from provided number. :param number int - the number provided. :return int - the number of steps taken to reach 1. """ if number < 1: raise ValueError("Provided number is less than 1.") steps = 0 while number != 1: steps += 1 # Even if number % 2 == 0: number = number / 2 else: number = number * 3 + 1 return steps
8681691946b5ba2d261a1ae753d2a04c46ac1719
7,533
import re def replace_space(string): """Replace all spaces in a word with `%20`.""" return re.sub(' ', '%20', string)
9b9b400f913efb7ee1e86a87335955744c9b1a3a
7,536
import re def port_to_string(port): """ Returns clear number string containing port number. :param port: port in integer (1234) or string ("1234/tcp") representation. :return: port number as number string ("1234") """ port_type = type(port) if port_type is int: return str(port) if port_type is str: return re.findall(r"\d+", port)[0]
5d526406566c7c37af223ed8e5744ba13771f55f
7,537
def merge(line): """ Helper function that merges a single row or column in 2048 """ result = list(line) head = 0 i = 1 while (i < len(result)): if (result[i] != 0): if (result[head] == result[i]): result[head] += result[i] result[i] = 0 head += 1 elif (result[head] == 0): result[head] = result[i] result[i] = 0 else: tmp = result[i] result[i] = 0 result[head + 1] = tmp head += 1 i += 1 return result
1a843ef4dc9c1b6cf20036f24288cb6166ae207b
7,539
def get_transcripts_from_tree(chrom, start, stop, cds_tree): """Uses cds tree to btain transcript IDs from genomic coordinates chrom: (String) Specify chrom to use for transcript search. start: (Int) Specify start position to use for transcript search. stop: (Int) Specify ending position to use for transcript search cds_tree: (Dict) dictionary of IntervalTree() objects containing transcript IDs as function of exon coords indexed by chr/contig ID. Return value: (set) a set of matching unique transcript IDs. """ transcript_ids = set() # Interval coordinates are inclusive of start, exclusive of stop if chrom not in cds_tree: return [] cds = list(cds_tree[chrom].overlap(start, stop)) for cd in cds: transcript_ids.add(cd.data) return list(transcript_ids)
51aa6f1aa97d2f977840376ea2c4bf422ff8e7a6
7,540