content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def sort_kv_pairs_by_value(d): """Turn a dict into a list of key-value pairs, sorted by value.""" return [ (k, v) for v, k in sorted([(v, k) for k, v in d.items()], reverse=True) ]
58f3e40c4993a64f71212157fdd73ad82712fa44
7,844
def check_length(min_length: int, max_length: int, mode: str = 'and', *args) -> bool: """ check items length is between min_length and max_length :param min_length: minimum length :param max_length: maximum length :param mode: check mode, 'and': all items need clear length check, 'or': more than one item need clear length check :param args: items :return: status, [correct] or [incorrect] """ if mode == 'and': for item in args: if not (min_length <= len(item) <= max_length): return False # if found incorrect item, stop check-loop and return False return True # if can't found incorrect item, return True else: for item in args: if min_length <= len(item) <= max_length: return True # if found correct item, stop check-loop and return True return False
f959619a466f62bf6ecfaf10e0fbb316652891e7
7,850
def check_processed_data(df,col_list,na_action): """ Checks if provided dataframe consists of required columns and no missing values """ check_passed = True # Check columns df_cols = df.columns if set(df_cols) != set(col_list): check_passed = False print('Column names mismatched') # Check missing values n_missing = df.isnull().sum().sum() if n_missing > 0: print('Data contains missing {} values'.format(n_missing)) if na_action == 'drop': print('Dropping rows with missing values') elif na_action == 'ignore': print('Keeping missing values as it is') else: print('Not adding this data into master dataframe') check_passed = False return check_passed
fd26d00eabd9eb1dfd1f8bce01a3fb86582a740f
7,851
def _determine_levels(index): """Determine the correct levels argument to groupby.""" if isinstance(index, (tuple, list)) and len(index) > 1: return list(range(len(index))) else: return 0
2eaed820eb45ff17eb4911fe48670d2e3412fb41
7,855
def format_val(v): """Takes in float and formats as str with 1 decimal place :v: float :returns: str """ return '{:.1f}%'.format(v)
932ca312a573a7e69aa12e032d7b062ac22f3709
7,858
import re def you_to_yall(text: str) -> str: """Convert all you's to y'all.""" pattern = r'\b(y)(ou)\b' return re.sub(pattern, r"\1'all", text, flags=re.IGNORECASE)
1a3ee7ebf2394f84ad296e18da789dff8ca50a12
7,860
def to_aws_tags(tags): """ When you assign tags to an AWS resource, you have to use the form [{"key": "KEY1", "value": "VALUE1"}, {"key": "KEY2", "value": "VALUE2"}, ...] This function converts a Python-style dict() into these tags. """ return [ {"key": key, "value": value} for key, value in tags.items() ]
d1e18ea1e4de08fcae59febbe97ab3eb5bc2a2f1
7,863
def get_game_player(game_state): """Get the game player whose turn it is to play.""" num_players = len(game_state.game_players) for game_player in game_state.game_players: if game_state.turn_number % num_players == game_player.turn_order: return game_player return None
a75a0b1b7f1ce4da8c944ad40208fba6f9a50f5a
7,865
import re def get_headers_pairs_list(filename, verbose=False): """ Read data for clustering from a given file. Data format: <email_number> <header>. Clusters are separated with a blank line. :param filename: file with input data for clustering. :param verbose: Whether to be verbose. Default - False. :return: List of pairs (email_number, header). """ if verbose: print("Reading headers...", end="") with open(filename, encoding='utf-8') as inf: lines = list(map(lambda x: x.strip(), inf.readlines())) lines = list(filter(lambda x: x != "", lines)) def make_pair(line): words = re.split(re.compile("\s+"), line) return ( int(words[0]), " ".join(words[1:]) ) if verbose: print("Done") return list(map(make_pair, lines))
d6b457652e1025475075fb601ba81b6a7fe346fc
7,867
from pathlib import Path def _run_jlab_string(talktorials_dst_dir): """ Print command for starting JupyterLab from workspace folder. Parameters ---------- talktorials_dst_dir : str or pathlib.Path Path to directory containing the talktorial folders. """ talktorials_dst_dir = Path(talktorials_dst_dir) message = f""" To start working with the talktorials in JupyterLab run: jupyter lab {talktorials_dst_dir} Enjoy! """ return message
76c45f77f419b6a302d63d92e51cd0ea1bb92ebb
7,869
def get_table_type(filename): """ Accepted filenames: device-<device_id>.csv, user.csv, session-<time_iso>.csv, trials-<time_iso>-Block_<n>.csv :param filename: name of uploaded file. :type filename: str :return: Name of table type. :rtype: str|None """ basename, ext = filename.split('.') parts = basename.split('-') first = parts[0] if ext == 'csv' and first in ['device', 'user', 'session', 'trials']: return first else: return None
bc4f39e4c9138168cec44c492b5d1965de711a7e
7,876
def make_download_filename(genome, args, response_format): """ Make filename that will explain to the user what the predictions are for :param genome: str: which version of the genome we are pulling data from :param args: SearchArgs: argument used for search :param response_format: str file extension/format :return: str filename that will be returned to the user """ prefix = 'predictions_{}_{}_{}'.format(genome, args.get_model_name(), args.get_gene_list()) middle = '' if not args.is_custom_ranges_list(): middle = '_{}_{}'.format(args.get_upstream(), args.get_downstream()) filename = '{}{}.{}'.format(prefix, middle, response_format) return filename.replace(' ', '_')
4645e3175de0db81e940d0e64740e937bce7afc3
7,878
def change(csq): """ >>> change("missense|TMEM240|ENST00000378733|protein_coding|-|170P>170L|1470752G>A") ('170P', '170L', True) >>> change('synonymous|AGRN|ENST00000379370|protein_coding|+|268A|976629C>T') ('268A', '268A', False) """ parts = csq.split("|") if len(parts) < 7: return 0, 0, False if not ">" in parts[-2]: return (parts[-2], parts[-2], False) aa_from, aa_to = parts[-2].split(">") # 533PLGQGYPYQYPGPPPCFPPAYQDPGFSYGSGSTGSQQSEGSKSSGSTRSSRRAPGREKERRAAGAGGSGSESDHTAPSGVGSSWRERPAGQLSRGSSPRSQASATAPGLPPPHPTTKAYTVVGGPPGGPPVRELAAVPPELTGSRQSFQKAMGNPCEFFVDIM*>533LWVRATPTSTRDPHPASRLPTRTRALAMAAAAPGVSRVKGAKAVGPPGAAAGPRAVRRSVGRRELGAVAVNRITRHRVGWGAAGESVRPASSAVAAAHAVRPRLPPRGSPRPTPRPRPIQWWGGHPGDPLSGSWLPSPRN* return aa_from, aa_to, True
da43c6bd45e641b885469d07c2e6befff334d8a3
7,884
def with_coordinates(pw_in_path, positions_type, atom_symbols, atom_positions): """Return a string giving a new input file, which is the same as the one at `pw_in_path` except that the ATOMIC_POSITIONS block is replaced by the one specified by the other parameters of this function. `positions_type`, `atom_symbols`, and `atom_positions` have the same meaning as the return values from `final_coordinates_from_scf()`. Assumes that there are no whitespace lines in the ATOMIC_POSITIONS block (not sure whether this is allowed by QE). """ with open(pw_in_path, 'r') as fp: in_lines = fp.readlines() out_lines = [] in_atomic_block = False atom_count = 0 for i, line in enumerate(in_lines): if 'ATOMIC_POSITIONS' in line: out_lines.append("ATOMIC_POSITIONS {}\n".format(positions_type)) in_atomic_block = True elif in_atomic_block: sym, pos = atom_symbols[atom_count], atom_positions[atom_count] pos_line = " {} {} {} {}\n".format(sym, str(pos[0]), str(pos[1]), str(pos[2])) out_lines.append(pos_line) atom_count += 1 if atom_count == len(atom_symbols): in_atomic_block = False else: out_lines.append(line) return ''.join(out_lines)
11dc207a4f17a6521a1aa6299d455f0548c50566
7,885
def calculate_factor_initial_size(n, key): """ Calculate different initial embedding sizes by a chosen factor. :param n: Number of nodes in the graph :param key: The factor- for example if key==10, the sizes will be n/10, n/100, n/100, .... the minimum is 100 nodes in the initial embedding :return: List of initial embedding sizes """ initial = [] i = n while i > 100: i = int(i/key) i_2 = int(i/2) key = pow(key, 2) if i > 100: initial.append(i) if i_2 > 100: initial.append(i_2) initial.append(100) ten_per = int(n/10) initial.append(ten_per) initial.sort() return initial
35fdef82e55647b20f9d86ec926e77c1d5244e2e
7,887
def is_iterable(value): """ Verifies the value is an is_iterable :param value: value to identify if iterable or not. """ try: iterable_obj = iter(value) return True except TypeError as te: return False
13728b7c28506086a3eb9b8faefcdc6276eba00e
7,888
def rw_normalize(A): """ Random walk normalization: computes D^⁼1 * A. Parameters ---------- A: torch.Tensor The matrix to normalize. Returns ------- A_norm: torch.FloatTensor The normalized adjacency matrix. """ degs = A.sum(dim=1) degs[degs == 0] = 1 return A / degs[:, None]
2a9f90d1f2bf02545e9719b0f373e6eb215fa0cd
7,889
import logging def _get_areas(params, feature_names): """Returns mapping between areas and constituent feature types.""" areas = {} for feature_id in range(len(params)): feature_info = params[feature_id] feature = feature_info["ID"] area = feature_info["Area"] if area not in areas: areas[area] = [feature] else: areas[area].append(feature) # Update the mapping from ID to name. feature_names[feature]["Name"] = feature_info["Name"] logging.info("Collected %d areas (%s).", len(areas), ",".join(areas.keys())) return areas
937797d7634ac6cd73af941c783f26d5a8028b4d
7,891
def transform_seed_objects(objects): """Map seed objects to state format.""" return {obj['instance_id']: { 'initial_player_number': obj['player_number'], 'initial_object_id': obj['object_id'], 'initial_class_id': obj['class_id'], 'created': 0, 'created_x': obj['x'], 'created_y':obj['y'], 'destroyed': None, 'destroyed_by_instance_id': None, 'destroyed_building_percent': None, 'deleted': False, 'destroyed_x': None, 'destroyed_y': None, 'building_started': None, 'building_completed': None, 'total_idle_time': None } for obj in objects}
685c0c5b22fdff108311338354bb42fb53fd07f6
7,894
from pathlib import Path def getFileAbsolutePath(rel_path): """ Retorna o caminho absoluto de um arquivo a partir de seu caminho relativo no projeto :param rel_path: :return: absolute_path """ data_folder = Path("PythoWebScraper/src") file_to_open = data_folder / rel_path return file_to_open
b39baaa26e3c8a7c7a8c41dbb4fafceb7ca78c70
7,904
def directive_exists(name, line): """ Checks if directive exists in the line, but it is not commented out. :param str name: name of directive :param str line: line of file """ return line.lstrip().startswith(name)
eb69044de8860b1779ce58ef107859028b8c98cf
7,906
def entrypoint(label): """ If a class if going to be registered with setuptools as an entrypoint it must have the label it will be registered under associated with it via this decorator. This decorator sets the ENTRY_POINT_ORIG_LABEL and ENTRY_POINT_LABEL class proprieties to the same value, label. Examples -------- >>> from dffml import entrypoint, Entrypoint >>> >>> @entrypoint('mylabel') ... class EntrypointSubclassClass(Entrypoint): pass In setup.py, EntrypointSubclassClass needs to have this decorator applied to it with label set to mylabel. .. code-block:: python entry_points={ 'dffml.entrypoint': [ 'mylabel = module.path.to:EntrypointSubclassClass', ] } """ def add_entry_point_label(cls): cls.ENTRY_POINT_ORIG_LABEL = label cls.ENTRY_POINT_LABEL = label return cls return add_entry_point_label
68cb3f2d2a982fefd34f412888ae50fcea6a743f
7,913
def hide_empty(value, prefix=', '): """Return a string with optional prefix if value is non-empty""" value = str(value) return prefix + value if value else ''
d2943dbce763bd054dc26839b2d24232867b3312
7,915
def as_dict_with_keys(obj, keys): """ Convert SQLAlchemy model to list of dictionary with provided keys. """ return [dict((a, b) for (a, b) in zip(keys, item)) for item in obj]
1fcab95f9f94696c1af652b0b181e6e3e69f7f53
7,922
def sdate_from_datetime(datetime_object, format='%y-%m-%d'): """ Converts a datetime object to SDATE string. """ return datetime_object.strftime(format)
3f7ee70c47e1971e1c690f564fc8d4df519db5b6
7,929
def arrays_shape(*arrays): """Returns the shape of the first array that is not None. Parameters ---------- arrays : ndarray Arrays. Returns ------- tuple of int Shape. """ for array in arrays: if array is not None: shape = array.shape return shape
e9e6a4876b938934c843386dffc58f0eccfb20a3
7,932
def cone(toplexes, subcomplex, coneVertex="*"): """Construct the cone over a subcomplex. The cone vertex can be renamed if desired. The resulting complex is homotopy equivalent to the quotient by the subcomplex.""" return toplexes + [spx + [coneVertex] for spx in subcomplex]
6b1328a2f7c32988666b0c7039efd0ce6ecdffef
7,937
def bipartite_sets(bg): """Return two nodes sets of a bipartite graph. Parameters: ----------- bg: nx.Graph Bipartite graph to operate on. """ top = set(n for n, d in bg.nodes(data=True) if d['bipartite']==0) bottom = set(bg) - top return (top, bottom)
618756dbfa87dc0b5d878545fa5395c4a122c84c
7,940
def _merge_block(internal_transactions, transactions, whitelist): """ Merge responses with trace and chain transactions. Remove non-whitelisted fields Parameters ---------- internal_transactions : list List of trace transactions transactions : list List of chain transactions whitelist : list List of allowed fields Returns ------- list List of trace transactions extended with whitelisted fields from related chain transactions """ transactions_by_id = { (transaction["hash"], transaction["blockHash"]): transaction for transaction in transactions } for transaction in internal_transactions: hash = transaction["transactionHash"] block = transaction["blockHash"] if (hash, block) in transactions_by_id: whitelisted_fields = { key: value for key, value in transactions_by_id[(hash, block)].items() if key in whitelist } transaction.update(whitelisted_fields) del transactions_by_id[(hash, block)] return internal_transactions
b60c9cde133d97ac84b2899956a15a72c720bbaa
7,945
def remove_virtual_slot_cmd(lpar_id, slot_num): """ Generate HMC command to remove virtual slot. :param lpar_id: LPAR id :param slot_num: virtual adapter slot number :returns: A HMC command to remove the virtual slot. """ return ("chhwres -r virtualio --rsubtype eth -o r -s %(slot)s " "--id %(lparid)s" % {'slot': slot_num, 'lparid': lpar_id})
2c6f14949910865f3a60c0016b4587088441572e
7,948
import json def dict_to_bytestring(dictionary): """Converts a python dict to json formatted bytestring""" return json.dumps(dictionary).encode("utf8")
31e201d87e92075d6078450ad019cc51f474f9d4
7,949
def _merge_candidate_name(src, dest): """Returns the formatted name of a merge candidate branch.""" return f"xxx-merge-candidate--{src}--{dest}"
a08b6d4b57385bc390e649448ce264cffd5a1ffa
7,950
def decorator_with_args(decorator_to_enhance): """ This is decorator for decorator. It allows any decorator to get additional arguments """ def decorator_maker(*args, **kwargs): def decorator_wrapper(func): return decorator_to_enhance(func, *args, **kwargs) return decorator_wrapper return decorator_maker
1af073daf9e9ac9a8834f3a1484bdfa293b1b2bb
7,951
import re def find_subtype(meta_dict): """Find subtype from dictionary of sequence metadata. Args: `meta_dict` (dict) dictionary of metadata downloaded from genbank Returns: `subtype` (str) RSV subtype as one letter string, 'A' or 'B'. """ subtype = '' if ' A' in meta_dict['organism']: subtype = 'A' elif ' B' in meta_dict['organism']: subtype = 'B' elif subtype == '': for val in meta_dict.values(): if re.search(r'RSV\s?A\b', val) or \ re.search(r'type: A\b', val) or \ re.search(r'group: A\b', val) or re.search(r'\bA\b', val): subtype = 'A' elif re.search(r'RSV\s?B\b', val) or \ re.search(r'type: B\b', val) or \ re.search(r'group: B\b', val) or re.search(r'\bB\b', val): subtype = 'B' return subtype
130b8ed61f117a786cf5f5ae49fc75e8b074ef2e
7,953
from typing import Sequence from typing import Type from typing import Union def unique_fast( seq: Sequence, *, ret_type: Type[Union[list, tuple]] = list ) -> Sequence: """Fastest order-preserving method for (hashable) uniques in Python >= 3.6. Notes ----- Values of seq must be hashable! See Also -------- `Uniquify List in Python 3.6 <https://www.peterbe.com/plog/fastest-way-to-uniquify-a-list-in-python-3.6>`_ """ return ret_type(dict.fromkeys(seq))
c41c6b298e52bd3069414206cf9ada4766ea8f4d
7,955
def getHeaders(lst, filterOne, filterTwo): """ Find indexes of desired values. Gets a list and finds index for values which exist either in filter one or filter two. Parameters: lst (list): Main list which includes the values. filterOne (list): A list containing values to find indexes of in the main list. filterTwo (list): A list to check by for the same index of a value from filterOne if it does not exist in the main list. Returns: (list): A list containing indexes of values either from filterOne or filterTwo. """ headers = [] for i in range(len(filterOne)): if filterOne[i] in lst: headers.append(lst.index(filterOne[i])) else: headers.append(lst.index(filterTwo[i])) return headers
0aa3612b15114b0e0dcdf55eb3643df58157239b
7,966
def top(Q): """Inspects the top of the queue""" if Q: return Q[0] raise ValueError('PLF ERROR: empty queue')
16327db2698fbef4cad1da2c9cb34b71158a2a6c
7,972
def move_items_back(garbages): """ Moves the items/garbage backwards according to the speed the background is moving. Args: garbages(list): A list containing the garbage rects Returns: garbages(list): A list containing the garbage rects """ for garbage_rect in garbages: # Loops through the garbages garbage_rect.centerx -= 2 # Decrease the centerx coordinate by 2 return garbages
9ffd28c7503d0216b67419009dac6ff432f3b100
7,984
import re def _mask_pattern(dirty: str): """ Masks out known sensitive data from string. Parameters ---------- dirty : str Input that may contain sensitive information. Returns ------- str Output with any known sensitive information masked out. """ # DB credentials as found in called processes. call_proc_db_creds = re.compile(r"--(user|password)=[^', ]+([', ])") clean = call_proc_db_creds.sub(r"--\1=*****\2", dirty) return clean
b75ae1e6ea128628dd9b11fadb38e4cbcfe59775
7,985
def __remove_duplicate_chars(string_input, string_replace): """ Remove duplicate chars from a string. """ while (string_replace * 2) in string_input: string_input = \ string_input.replace((string_replace * 2), string_replace) return string_input
6302bf225fcd5cde822e0640eb7b2c44dcc93fc8
7,989
def format_birthday_for_database(p_birthday_of_contact_month, p_birthday_of_contact_day, p_birthday_of_contact_year): """Takes an input of contact's birthday month, day, and year and creates a string to insert into the contacts database.""" formated_birthday_string = p_birthday_of_contact_month + "/" + p_birthday_of_contact_day + "/" + p_birthday_of_contact_year return formated_birthday_string
1b4eff67a4073505f9f693f63db6f7c32646bd53
7,992
import math def normal_distribution(x: float) -> float: """ 標準正規分布 f(x) = (1/sqrt(2pi))exp(-x^2/2) """ # 係数 coefficient = 1 / math.sqrt(2 * math.pi) # 指数 exponent = math.exp(-(x ** 2) / 2) # 標準正規分布 return coefficient * exponent
2ee740674ddf2a7b95d507aedf1f5453a75f31f1
7,993
def str_format(s, *args, **kwargs): """Return a formatted version of S, using substitutions from args and kwargs. (Roughly matches the functionality of str.format but ensures compatibility with Python 2.5) """ args = list(args) x = 0 while x < len(s): # Skip non-start token characters if s[x] != '{': x += 1 continue end_pos = s.find('}', x) # If end character can't be found, move to next character if end_pos == -1: x += 1 continue name = s[x + 1:end_pos] # Ensure token name is alpha numeric if not name.isalnum(): x += 1 continue # Try find value for token value = args.pop(0) if args else kwargs.get(name) if value: value = str(value) # Replace token with value s = s[:x] + value + s[end_pos + 1:] # Update current position x = x + len(value) - 1 x += 1 return s
addf11c8390c52da7b0f2886d2298bab66a50e49
7,996
def read_float(field: str) -> float: """Read a float.""" return float(field) if field != "" else float('nan')
ce6f862c9696e3f84ba20b58a0acdce5c1e212b0
8,010
def make_word(parts, sub, i): """Replace a syllable in a list of words, and return the joined word.""" j = 0 for part in parts: for k in range(len(part)): if i == j: part[k] = sub return ' '.join(''.join(p for p in part) for part in parts) j += 1
869aa94f60b819b38ad7ddc315b65d862cf525e0
8,017
def reverse_complement(string): """Returns the reverse complement strand for a given DNA sequence""" return(string[::-1].translate(str.maketrans('ATGC','TACG')))
bd033b9be51a92fdf111b6c63ef6441c91b638a3
8,022
def _parse_seq(seq): """Get a primary sequence and its length (without gaps)""" return seq, len(seq.replace('-', ''))
f02c6f316e3bc56d3d77b6ca6363aa1e0a6b4780
8,023
def reverse_digit(x): """ Reverses the digits of an integer. Parameters ---------- x : int Digit to be reversed. Returns ------- rev_x : int `x` with it's digits reversed. """ # Initialisations if x < 0: neg = True else: neg = False x = abs(x) x_len = len(str(x)) # Number of digits in `x` rev_x = 0 for i in range(x_len): digit = x % 10 x = x // 10 rev_x += digit*(10**(x_len-i-1)) if neg == True: rev_x = -1*rev_x return rev_x else: return rev_x
d7358f5778897262b8d57c6dd3e2eb4acb05a2d1
8,025
def hammingDistance(str1, str2): """ Returns the number of `i`th characters in `str1` that don't match the `i`th character in `str2`. Args --- `str1 : string` The first string `str2 : string` The second string Returns --- `differences : int` The differences between `str1` and `str2` """ # Convert strings to arrays a = list(str1) b = list(str2) # Determine what n equals if len(a) < len(b): n = len(a) else: n = len(b) # Increment the number of distances for each difference differences = 0 for i in range(n): if a[i] != b[i]: differences += 1 return differences
ad35cc79f89171c75a16a13ec6862a2a4abdbd61
8,032
from typing import OrderedDict def _order_dict(d): """Convert dict to sorted OrderedDict""" if isinstance(d, dict): d = [(k, v) for k, v in d.items()] d = sorted(d, key=lambda x: x[0]) return OrderedDict(d) elif isinstance(d, OrderedDict): return d else: raise Exception('Must be dict or OrderedDict')
94b3e9e0c5b34e466c913c46683c752ca9560a12
8,035
def clean_toc(toc: str) -> str: """Each line in `toc` has 6 unnecessary spaces, so get rid of them""" lines = toc.splitlines() return "\n".join(line[6:] for line in lines)
40a22200d04c12865e4bffae9cdd3bdc4ea827be
8,042
import pickle def pickle_load(file_path): """ data = pickle_load(file_path) Load data from a pickle dump file inputs: file_path: str, path of the pickle file output: data: python object """ with open(file_path, 'rb') as file_ptr: data = pickle.load(file_ptr) return data
c9112881facc7a6a135893168fa9abf63309b392
8,043
def versiontuple(v): """Return the version as a tuple for easy comparison.""" return tuple(int(x) for x in v.split("."))
422d49818e2d6d34d0591c1c450d0c92b7fabc82
8,044
import re def find_checksum(s): """Accepts a manifest-style filename and returns the embedded checksum string, or None if no such string could be found.""" m = re.search(r"\b([a-z0-9]{32})\b", s, flags=re.IGNORECASE) md5 = m.group(1).lower() if m else None return md5
f0d9735833e257a3f133d2d51b01775e35408755
8,045
def get_expts(context, expts): """Takes an expts list and returns a space separated list of expts.""" return '"' + ' '.join([expt['name'] for expt in expts]) + '"'
76cd216662a6f7a248b70351f79b52e3c47871aa
8,048
def create_filename(last_element): """ For a given file, returns its name without the extension. :param last_element: str, file name :return: str, file name without extension """ return last_element[:-4]
3faaf647ed6e3c765321efb396b2806e7ef9ddf9
8,050
def simplify_numpy_dtype(dtype): """Given a numpy dtype, write out the type as string Args: dtype (numpy.dtype): Type Returns: (string) name as a simple string """ kind = dtype.kind if kind == "b": return "boolean" elif kind == "i" or kind == "u": return "integer" elif kind == "f": return "float" elif kind == "c": return "complex" elif kind == "m": return "timedelta" elif kind == "M": return "datetime" elif kind == "S" or kind == "U": return "string" else: return "python object"
c2370b2a08e58c9614ca43e3f14864782eef4100
8,056
def OpenFace(openface_features, PID, EXP): """ Tidy up OpenFace features in pandas data.frame to be stored in sqlite database: - Participant and experiment identifiers are added as columns - Underscores in column names are removed, because sqlite does not like underscores in column names. - Only column names with 'AU' in the column name are considered relevant features and kept, the rest is removed. Parameters ---------- video_features : pandas.core.frame.DataFrame Data frame with columns participant_id (str), experiment_id (str), timestamp (datetime64), and columns for the OpenFace derived video features: AU01r (float64), AU02r (float64), AU01c (float64), AU02c (float64) PID : str Participant identifier EXP : str Experiment identifier. Returns ------- video_features : pandas.core.frame.DataFrame New data frame """ # tidy up data frame: filter_col = [col for col in openface_features if col.startswith('AU')] filter_col.insert(0,'time') filter_col.insert(0,'participant_id') filter_col.insert(0,'experiment_id') openface_features['participant_id'] = PID openface_features['experiment_id'] = EXP openface_features = openface_features[filter_col] openface_features.columns = openface_features.columns.str.replace('_', '') openface_features = openface_features.rename(columns = {'experimentid':'experiment_id'}) openface_features = openface_features.rename(columns = {'participantid':'participant_id'}) return openface_features
684e74c159a3551e3fd6d9a80b134c2474759056
8,059
import copy def deep_merge_dict(base, priority): """Recursively merges the two given dicts into a single dict. Treating base as the the initial point of the resulting merged dict, and considering the nested dictionaries as trees, they are merged os: 1. Every path to every leaf in priority would be represented in the result. 2. Subtrees of base are overwritten if a leaf is found in the corresponding path in priority. 3. The invariant that all priority leaf nodes remain leafs is maintained. Parameters ---------- base : dict The first, lower-priority, dict to merge. priority : dict The second, higher-priority, dict to merge. Returns ------- dict A recursive merge of the two given dicts. Example: -------- >>> base = {'a': 1, 'b': 2, 'c': {'d': 4}, 'e': 5} >>> priority = {'a': {'g': 7}, 'c': 3, 'e': 5, 'f': 6} >>> result = deep_merge_dict(base, priority) >>> print(sorted(result.items())) [('a', {'g': 7}), ('b', 2), ('c', 3), ('e', 5), ('f', 6)] """ if not isinstance(base, dict) or not isinstance(priority, dict): return priority result = copy.deepcopy(base) for key in priority.keys(): if key in base: result[key] = deep_merge_dict(base[key], priority[key]) else: result[key] = priority[key] return result
5c527f45d4ddee00f1e905b09165bcd3551412f6
8,063
def _get_span(s, pattern): """Return the span of the first group that matches the pattern.""" i, j = -1, -1 match = pattern.match(s) if not match: return i, j for group_name in pattern.groupindex: i, j = match.span(group_name) if (i, j) != (-1, -1): return i, j return i, j
8feec723d5a09e70f000c6fcdf58269dd6ea9330
8,065
from pathlib import Path def guess_format(path): """Guess file format identifier from it's suffix. Default to DICOM.""" path = Path(path) if path.is_file(): suffixes = [x.lower() for x in path.suffixes] if suffixes[-1] in ['.h5', '.txt', '.zip']: return suffixes[-1][1:] if suffixes[-1] == '.nii' or path.suffixes[-2:] == ['.nii', '.gz']: return 'nifti' return 'dicom'
70f463ef28adc2c65346ec8b5b87294494a9ee0f
8,066
def get_samples(profileDict): """ Returns the samples only for the metrics (i.e. does not return any information from the activity timeline) """ return profileDict["samples"]["metrics"]
06b273e7499e9cb64e38b91374117de6bd3f6c3e
8,069
def dobro(n): """ Dobrar número :param n: número a ser dobrado :return: resultado """ n = float(n) n += n return n
fe17270b7a3373545986568657cf6755dc88638f
8,070
def aoi_from_experiment_to_cairo(aoi): """Transform aoi from exp coordinates to cairo coordinates.""" width = round(aoi[1]-aoi[0], 2) height = round(aoi[3]-aoi[2], 2) return([aoi[0], aoi[2], width, height])
40986aeaf5bb1e5d8295289ce310b4c7cf7f4241
8,071
def palindromic(d): """Gets a palindromic number made from a product of two d-digit numbers""" # Get the upper limit of d digits, e.g 3 digits is 999 a = 10 ** d - 1 b = a # Get the lower limit of d digits, e.g. 3 digits is 100 limit = 10 ** (d - 1) for x in range(a, limit - 1, -1): for y in range(b, limit - 1, -1): tmp = x * y if str(tmp) == str(tmp)[::-1]: print(x, y) return x * y return 0
d4324d6c2de3dff46e14b754627bb551e03e957f
8,073
def markup(pre, string): """ By Adam O'Hern for Mechanical Color Returns a formatting string for modo treeview objects. Requires a prefix (usually "c" or "f" for colors and fonts respectively), followed by a string. Colors are done with "\03(c:color)", where "color" is a string representing a decimal integer computed with 0x01000000 | ((r << 16) | (g << 8) | b). Italics and bold are done with "\03(c:font)", where "font" is the string FONT_DEFAULT, FONT_NORMAL, FONT_BOLD or FONT_ITALIC. \03(c:4113) is a special case gray color specifically for treeview text. """ return '\03({}:{})'.format(pre, string)
7ef910aa3b057e82c777b06f73faf554d9c4e269
8,079
from bs4 import BeautifulSoup def get_person_speech_pair(file_name): """ XML parser to get the person_ids from given XML file Args: file_name(str): file name Returns: person_id_speech_pair(dict): Dict[person_id(int) -> speech(str)] """ person_id_speech_dict = dict() with open(file_name, encoding='utf-8') as file: soup = BeautifulSoup(file, 'xml') all_speech = soup.find_all('speaker') for single_speech in all_speech: try: # newer format person_id = single_speech['personId'] except KeyError: try: # older format person_id = single_speech['person'] except KeyError: continue single_speech_list = [] for s in single_speech.stripped_strings: single_speech_list.append(s) processed_speech = ' '.join(single_speech_list) # print(parsed_speech, '\n') if person_id not in person_id_speech_dict: person_id_speech_dict[person_id] = [] person_id_speech_dict[person_id].append(processed_speech) for person_id in person_id_speech_dict: person_id_speech_dict[person_id] = ' '.join(person_id_speech_dict[person_id]) return person_id_speech_dict
0751ed8d46c39027c0503dcd94d8c324c700c1a5
8,084
def _pad_keys_tabular(data, sort): """Pad only the key fields in data (i.e. the strs) in a tabular way, such that they all take the same amount of characters Args: data: list of tuples. The first member of the tuple must be str, the rest can be anything. Returns: list with the strs padded with space chars in order to align in tabular way """ if sort: data = sorted(data, key=lambda tup: tup[0]) sizes = [len(t[0]) for t in data] pad = max(sizes) + 2 data = [(t[0].ljust(pad), *t[1:]) for t in data] return data
eba58694354a89e0a6d808c08f964755f3b11822
8,090
import bz2 import base64 def passx_decode(passx): """decode the obfuscated plain text password, returns plain text password""" return bz2.decompress(base64.b64decode(passx.encode("ascii"))).decode("ascii")
b8b2138c55dd28734661484a231128e6f3ccbbb7
8,094
def checksum_file(summer, path): """ Calculates the checksum of the file 'path' using the provided hashlib digest implementation. Returns the hex form of the digest. """ with open(path, "rb") as f: # Read the file in 4KB chunks until EOF. while True: chunk = f.read(4096) if not chunk: break summer.update(chunk) return summer.hexdigest()
729b8f895fe74856e83046d0fd5177e584f835f2
8,097
def camel_case(value): """Convert an identifier to CamelCase""" return "".join(ele.title() for ele in value.split("_"))
e7c74ebe7611eb567f3eae8f16fb01aadd201252
8,098
def format_directory_path(path: str) -> str: """Replaces windows style path seperators to forward-slashes and adds another slash to the end of the string. """ if path == ".": return path formatted_path = path.replace('\\', '/') if formatted_path[-1] is not '/': formatted_path += '/' return formatted_path
0daa0cc65e50bd29c76da64d302c0e01c1bb333b
8,101
def get_metagen_search_body(self): """ Get the MetaGenSearchView view body. Attributes ---------- measure: str the genomic measure label. gene: str the gene name. Returns ------- html: str the MetaGenSearchView view body. """ # Get parameters measure = self._cw.form["measure"] gene = self._cw.form["gene"] # Get the HTML body code view = self._cw.vreg["views"].select("metagen-search", req=self._cw, rset=None) html = view.render(measure=measure, gene=gene, export_type="data", subjects="all") return html
9b1e8c6072991765cf566f1bccccb38b178141a4
8,102
def _toCamelCase(string): """Convert a snake case string (PyTorch) to camel case (PopART)""" words = string.split("_") return words[0] + "".join(w.capitalize() for w in words[1:])
f1f21b0313c03b3d63944ee3fcbd5e16b435da6d
8,103
def pulse(x): """Return the pulse fn of the input.""" return 2*(x % 1 < .5) -1
f54f73ab6656c0242508170c16ab6ee6a0cc5b92
8,104
def index_singleton_clusters(clusters): """Replace cluster labels of -1 with ascending integers larger than the maximum cluster index. """ clusters = clusters.copy() filt = clusters == -1 n = clusters.max() clusters[filt] = range(n, n + len(filt)) return clusters
9cad0df27d2d99ef3a7478f3c3753cd7795beb54
8,105
def scaled_up_roi(roi, scale: int, shape=None): """ Compute ROI for a scaled up image. Given a crop region in the original image compute equivalent crop in the upsampled image. :param roi: ROI in the original image :param scale: integer scale to get scaled up image :return: ROI in the scaled upimage """ roi = tuple(slice(s.start * scale, s.stop * scale) for s in roi) if shape is not None: roi = tuple( slice(min(dim, s.start), min(dim, s.stop)) for s, dim in zip(roi, shape) ) return roi
24f160bde7f995861aee3f0c20001ce4093aa58a
8,109
def test_depends(func): """Decorator to prevent a test being executed in individual mode""" def invalid(self, test): if self.test_individual: test.description = "Invalid" return test.DISABLED("This test cannot be performed individually") else: return func(self, test) invalid.__name__ = func.__name__ invalid.__doc__ = func.__doc__ return invalid
4b2db29fc8c0a30ec3a4ec6c3fb93ed958f0094e
8,113
def fix_month(bib_str: str) -> str: """Fixes the string formatting in a bibtex entry""" return ( bib_str.replace("{Jan}", "jan") .replace("{jan}", "jan") .replace("{Feb}", "feb") .replace("{feb}", "feb") .replace("{Mar}", "mar") .replace("{mar}", "mar") .replace("{Apr}", "apr") .replace("{apr}", "apr") .replace("{May}", "may") .replace("{may}", "may") .replace("{Jun}", "jun") .replace("{jun}", "jun") .replace("{Jul}", "jul") .replace("{jul}", "jul") .replace("{Aug}", "aug") .replace("{aug}", "aug") .replace("{Sep}", "sep") .replace("{sep}", "sep") .replace("{Oct}", "oct") .replace("{oct}", "oct") .replace("{Nov}", "nov") .replace("{nov}", "nov") .replace("{Dec}", "dec") .replace("{dec}", "dec") )
9bdcb06dc43a6d6748af20d5279ca38ec6aa1d0a
8,118
def pad(text: str, width: int, align: str = '<', fill: str = ' '): """ pad the string with `fill` to length of `width` :param text: text to pad :param width: expected length :param align: left: <, center: ^, right: > :param fill: char to fill the padding :return: """ assert align in ('<', '^', '>') return f"{text:{fill}{align}{width}}"
74befd22927438961b85e370ed16239d7df52707
8,120
import torch def create_input(shape): """Create a random input tensor.""" return torch.rand(shape).float()
88a907bae19882a4a7c1a0b819eb0deccb065752
8,123
def add_data_to_list(twitter_return): """ Extract the data from the twitter_return dictionary and place in a list """ twitter_dict = twitter_return ['response_dict'] # Grab the twitter data twitter_data_list=twitter_dict['data'] return twitter_data_list
9e2a2a5e22926b604856c1ec1ae20ebf765b8610
8,128
from typing import Dict def get_noise_range_pcts(db_range_exps: dict, length: float) -> Dict[int, float]: """Calculates percentages of aggregated exposures to different noise levels of total length. Note: Noise levels exceeding 70 dB are aggregated and as well as noise levels lower than 50 dB. Returns: A dictionary containing noise level values with respective percentages. (e.g. { 50: 35.00, 60: 65.00 }) """ return { db_range: round(db_range_length * 100 / length, 3) for db_range, db_range_length in db_range_exps.items() }
723c7e45a24c149df6f5f19b3f8aabb1f8d5b184
8,130
def calc_n_max_vehicle(n2v_g_vmax, v_max): """Calc `n_max3` of Annex 2-2.g from `v_max` (Annex 2-2.i). """ return n2v_g_vmax * v_max
457562edce05aebf7d7b870a232b4e0a01df5055
8,133
def get_filepath_wo_ext(file_spec): """ Get file path without extension Parameters ---------- file_spec : DataStruct The function use attributes output_dir and fname_wo_ext for construct file path Returns ------- out : str Constructed file path """ return file_spec.output_dir + '/' + file_spec.fname_wo_ext
6ef9d329292769a3f163678915ff696ef3b8fe1a
8,141
def amplitude(data): """ Calculates the amplitude of a data list. """ n = len(data) if (n == 0) : amplitude = None else : min_value = min(data) max_value = max(data) amplitude = (max_value + min_value) / 2.0 return amplitude
4dc053287f2de3748961943a8d9d064c9a3d1f87
8,143
def ranks_from_scores(scores): """Return the ordering of the scores""" return sorted(range(len(scores)), key=lambda i: scores[i], reverse=True)
f49fe306f456d990be5ad8308eae07f186a89da6
8,145
import struct def unpack_word(str, big_endian=False): """ Unpacks a 32-bit word from binary data. """ endian = ">" if big_endian else "<" return struct.unpack("%sL" % endian, str)[0]
8da8d168b1828062bd44ca3142c8b389bfd634c7
8,146
import ast def parse_source(source, path=None): """Parse python source into an AST.""" path = "<unknown>" if path is None else path return ast.parse(source, filename=path)
7d1188e96b3a72220eca084cf18fc5f7b0b35ef3
8,147
def is_hit(x, y): """Return wheter given coords hit a circular target of r=1.""" return x*x + y*y <= 1
4f71afa458ad0a891010e1f5a2be3049b0818c71
8,149
def set_fpn_weights(training_model, inference_model, verbose=False): """ Set feature pyramid network (FPN) weights from training to inference graph Args: training_model: MaskRCNN training graph, tf.keras.Model inference_model: MaskRCNN inference graph, tf.keras.Model verbose: Print layers that get weights, bool Returns: inference_model """ fpn_layers = ['fpn_c5p5', 'fpn_c4p4', 'fpn_c3p3', 'fpn_c2p2', 'fpn_p5', 'fpn_p4', 'fpn_p3', 'fpn_p2', ] for layer_name in fpn_layers: # Get weights from training graph layer_weights = training_model.get_layer(layer_name).get_weights() # Set weights in inference graph inference_model.get_layer(layer_name).set_weights(layer_weights) if verbose: print(f'Set weights: {layer_name}') return inference_model
29aadfcd0dcb50edb41938433ff644b1b62f209e
8,150
def navamsa_from_long(longitude): """Calculates the navamsa-sign in which given longitude falls 0 = Aries, 1 = Taurus, ..., 11 = Pisces """ one_pada = (360 / (12 * 9)) # There are also 108 navamsas one_sign = 12 * one_pada # = 40 degrees exactly signs_elapsed = longitude / one_sign fraction_left = signs_elapsed % 1 return int(fraction_left * 12)
d151f66c0e69541ccdcc3cabc4d0de82e7aa84bd
8,153
def compare_connexion(conn1, conn2): """See if two connexions are the same. Because the :class:`connexion` could store the two components in different orders, or have different instances of the same component object, direct comparison may fail. This function explicitly compares both possible combinations of serial numbers. Parameters ---------- conn1 : :obj:`connexion` The first connexion object. conn2 : :obj:`connexion` The second connexion object. Returns ------- :obj:`True` if the connexions are the same, :obj:`False` otherwise. """ sn11 = conn1.comp1.sn sn12 = conn1.comp2.sn sn21 = conn2.comp1.sn sn22 = conn2.comp2.sn if (sn11 == sn21 and sn12 == sn22) or (sn11 == sn22 and sn12 == sn21): return True else: return False
25c3737cfcdb0ab6516237ea5423e2237cc94529
8,154
def without_end_slash(url): """Makes sure there is no end slash at the end of a url.""" return url.rstrip("/")
19d6b49f7d2a788ea4bb81179e596eb6f019843e
8,155
from stat import S_ISDIR def list_remote_files(con, directory): """ List the files and folders in a remote directory using an active SFTPClient from Paramiko :param con: SFTPClient, an active connection to an SFTP server :param directory: string, the directory to search :return: (generator, generator), the files and directories as separate generators """ print(directory) all_files = [file for file in con.listdir_attr(directory)] files = [] dirs = [] for file in all_files: if S_ISDIR(file.st_mode): file.path = directory + f'/{file.filename}' # ad-hoc add the remote filepath since Paramiko ignores this?! dirs.append(file) else: file.path = directory + f'/{file.filename}' # ad-hoc add the remote filepath since Paramiko ignores this?! files.append(file) files = (file for file in files) dirs = (dir for dir in dirs) return (files, dirs)
f65e4a5d48f793ff3703ea5e4fc88a0e9b7ea39d
8,159
def distinct_count(daskDf, columnName): """Counts distint number of values in Dask dataframe Keyword arguments: daskDf -- Dask dataframe columnName -- Column name Return: return -- Distinct number of values """ return daskDf[columnName].drop_duplicates().size
27a03a6eef1f9c949d22f02e5d880ef626b6acf6
8,165
from typing import OrderedDict def save_chain(config, chain): """ Encode a chain of operation classes as json. :param config: dictionary with settings. :param chain: OrderedDict of operation class lists. :return: string-encoded version of the above. """ di = OrderedDict() di['__config__'] = config for key, ops in chain.items(): di[key] = [op.to_json() for op in ops] return di #return dumps(di, indent = 2)
d6838dfe1f079233ba8e3d93c62ddc1ebbcec5e4
8,170
def db_connection_string(dbconf): # type: (dict) -> str """ Constructs a database connection string from the passed configuration object. """ user = dbconf["user"] password = dbconf["password"] db_name = "traffic_ops" if dbconf["type"] == "Pg" else dbconf["type"] hostname = dbconf["hostname"] port = dbconf["port"] return "postgresql://{user}:{password}@{hostname}:{port}/{db_name}".format(user=user, password=password, hostname=hostname, port=port, db_name=db_name)
3fbb52c398f5150f6101b9d0d286f1db1b8aa99f
8,171
def normalize_units( df, unitsmap, targetunit, paramcol="parameter", rescol="res", unitcol="units", napolicy="ignore", ): """ Normalize units of measure in a dataframe. Parameters ---------- df : pandas.DataFrame Dataframe contained results and units of measure data. unitsmap : dictionary Dictionary where keys are the units present in the df and values are the conversion factors required to standardize results to a common unit. targetunit : string or dict The desired final units of measure. Must be present in ``unitsmap``. If a string, all rows in df will be assigned ``targetunit``. If a dictionary, the units will be mapped from the dictionary using the values of ``paramcol`` as keys. paramcol, rescol, unitcol : string, optional Labels for the parameter, results, and units columns in the df. napolicy : string, optional Determines how mull/missing values are stored. By default, this is set to "ignore". Use "raise" to throw a ``ValueError`` if when unit conversion or target units data cannot be found. Returns ------- normalized : pandas.DataFrame Dataframe with normalized units of measure. """ # determine the preferred units in the wqdata target = df[paramcol].map(targetunit) # factors to normialize to standard units normalization = df[unitcol].map(unitsmap) # factor to convert to preferred units conversion = target.map(unitsmap) if napolicy == "raise": msg = "" if target.isnull().any(): nulls = df[target.isnull()][paramcol].unique() msg += "Some target units could not be mapped to the {} column ({})\n".format( paramcol, nulls ) if normalization.isnull().any(): nulls = df[normalization.isnull()][unitcol].unique() msg += "Some normalization factors could not be mapped to the {} column ({})\n".format( unitcol, nulls ) if conversion.isnull().any(): nulls = target[conversion.isnull()] msg += "Some conversion factors could not be mapped to the target units ({})".format( nulls ) if len(msg) > 0: raise ValueError(msg) # convert results normalized = df.assign( **{rescol: df[rescol] * normalization / conversion, unitcol: target} ) return normalized
89aa2692ae778eede36d02b8bea756793a55c172
8,172
from typing import List from typing import Dict from typing import Any def format_sents_for_output(sents: List[str], doc_id: str) -> Dict[str, Dict[str, Any]]: """ Transform a list of sentences into a dict of format: { "sent_id": {"text": "sentence text", "label": []} } """ formatted_sents = {} for i, sent in enumerate(sents): formatted_sents.update({f"{doc_id}_sent_{i}": {"text": sent, "label": []}}) return formatted_sents
d6178ac48da4d95e8d3727ca9220168e06ba223e
8,177