content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def sequential_colors(n): """ Between 3 and 9 sequential colors. .. seealso:: `<https://personal.sron.nl/~pault/#sec:sequential>`_ """ # https://personal.sron.nl/~pault/ # as implemented by drmccloy here https://github.com/drammock/colorblind assert 3 <= n <= 9 cols = ['#FFFFE5', '#FFFBD5', '#FFF7BC', '#FEE391', '#FED98E', '#FEC44F', '#FB9A29', '#EC7014', '#D95F0E', '#CC4C02', '#993404', '#8C2D04', '#662506'] indices = [[2, 5, 8], [1, 3, 6, 9], [1, 3, 6, 8, 10], [1, 3, 5, 6, 8, 10], [1, 3, 5, 6, 7, 9, 10], [0, 2, 3, 5, 6, 7, 9, 10], [0, 2, 3, 5, 6, 7, 9, 10, 12]] return [cols[ix] for ix in indices[n - 3]]
d2ad5f8993f8c7dac99a577b6115a8452ad30024
703,657
def get_king_moves(): """Generate the king's movement range.""" return [(1, 0), (1, -1), (-1, 0), (-1, -1), (0, 1), (0, -1), (-1, 1), (1, 1)]
5e6f5fcb8c57846b9b2ab112c27f90fc13a6d6b4
703,659
def _unpack_topk(topk, lp, hists, attn=None): """unpack the decoder output""" beam, _ = topk.size() topks = [t for t in topk] lps = [l for l in lp] k_hists = [(hists[0][:, i, :], hists[1][:, i, :], hists[2][i, :]) for i in range(beam)] if attn is None: return topks, lps, k_hists else: attns = [attn[i] for i in range(beam)] return topks, lps, k_hists, attns
7e9012b62f25ec7f4a09387235a0161143ec5029
703,660
def box2start_row_col(box_num): """ Converts the box number to the corresponding row and column number of the square in the upper left corner of the box :param box_num: Int :return: len(2) tuple [0] start_row_num: Int [1] start_col_num: Int """ start_row_num = 3 * (box_num // 3) start_col_num = 3 * (box_num % 3) return start_row_num, start_col_num
b11e7d4317e1dd32e896f1541b8a0cf05a342487
703,661
import re def create_title(chunk, sep, tags): """Helper function to allow doconce jupyterbook to automatically assign titles in the TOC If a chunk of text starts with the section specified in sep, lift it up to a chapter section. This allows doconce jupyterbook to automatically use the section's text as title in the TOC on the left :param str chunk: text string :param str sep: chapter|section|subsection :param dict tags: tag patterns, e.g. INLINE_TAGS from common.py :return: tuple with the chunk stripped of its section header, and title :rtype: (str, str) """ title = '' m = re.search(tags[sep], chunk, flags=re.MULTILINE) if m and m.start() == 0: name2s = {'chapter': 9, 'section': 7, 'subsection': 5, 'subsubsection': 3} s = name2s[sep] header_old = '=' * s pattern = r'^ *%s +(.+?) +%s' % (header_old, header_old) # Get the title mt = re.match(pattern, chunk) if mt: title = mt.group(1) chunk = re.sub(pattern, '', chunk, flags=re.MULTILINE, count=1) return chunk, title
67bd80c10733d79f84ba38cd44155e73bb2a2efd
703,662
import uuid def get_uuid(): """ Returns a uuid4 (random UUID) specified by RFC 4122 as a 32-character hexadecimal string """ return uuid.uuid4().hex
ce3f00569c3fa12aa203246bd5d3ae098f2dab2a
703,664
import re def hashtag(phrase, plain=False): """ Generate hashtags from phrases. Camelcase the resulting hashtag, strip punct. Allow suppression of style changes, e.g. for two-letter state codes. """ words = phrase.split(' ') if not plain: for i in range(len(words)): try: if not words[i]: del words[i] words[i] = words[i][0].upper() + words[i][1:] words[i] = re.sub(r"['./-]", "", words[i]) except IndexError: break return '#' + ''.join(words)
b6e7ab647330a42cf9b7417469565ce5198edd4f
703,665
def gather_data_to_plot(wells, df): """ Given a well ID, plot the associated data, pull out treatments. Pull in dataframe of all the data. """ data_to_plot = [] error_to_plot = [] legend = [] for well in wells: data_to_plot.append(df.loc[well, '600_averaged']) error_to_plot.append(df.loc[well, '600_std']) legend.append(df.loc[well, 'cell']) return data_to_plot, error_to_plot, legend #return data_to_plot, legend
1870384b94c2cf3a8a5da84b848586e0a95d1713
703,667
import re def is_ignored(filename, ignores): """ Checks if the filename matches any of the ignored keywords :param filename: The filename to check :type filename: `str` :param ignores: List of regex paths to ignore. Can be none :type ignores: `list` of `str` or None :rtype: `bool` """ return ignores and len([x for x in ignores if re.search(x, filename)]) > 0
3d5016d5ac86efdf9f67a251d5d544b06347a3bf
703,668
import hashlib import os def ftp_file_hash(con): """ Get the file hashes inside the FTP server Assumes that the FTP connection is already in the wordpress dir. """ # Function to get MD5 Hash inside the FTP server def get_md5(fpath): """ Returns the md5 hash string of a file """ ftp_md5 = hashlib.md5() with con.open(fpath, 'rb') as f: for chunk in iter(lambda: f.read(4096), b""): ftp_md5.update(chunk) return ftp_md5.hexdigest() # Function to get WordPress hash via FTP def get_hash_dict(): """ Returns the hash dictionary of php files inside the ftp server """ hash_dict = {} for root, dirs, files in con.walk('.'): for x in files: if x.endswith('.php'): path = os.path.normpath(con.path.join(root, x)) hash_dict[path] = {'hash': get_md5(path), 'path': path} return hash_dict # Get Hash Dictionary ftp_hash_dict = get_hash_dict() return ftp_hash_dict
fac1d9ee558a7339135ccf5f1a25191fef2aac4f
703,669
def stop_program(): """Small function to ask for input and stop if needed """ ok = input("Press S to Stop, and any other key to continue...\n") if ok in ["S", "s"]: return True return False
c076d4a443331d64ef5855f0d20f7db2adb0cf11
703,670
def rotate(n): """ Rotate 180 the binary bit string of n and convert to integer """ bits = "{0:b}".format(n) return int(bits[::-1], 2)
23fa595ee66c126ad7eae0fa1ca510cf0cebdfbd
703,671
def flip_thetas(thetas, theta_pairs): """Flip thetas. Parameters ---------- thetas : numpy.ndarray Joints in shape (num_thetas, 3) theta_pairs : list List of theta pairs. Returns ------- numpy.ndarray Flipped thetas with shape (num_thetas, 3) """ thetas_flip = thetas.copy() # reflect horizontally thetas_flip[:, 1] = -1 * thetas_flip[:, 1] thetas_flip[:, 2] = -1 * thetas_flip[:, 2] # change left-right parts for pair in theta_pairs: thetas_flip[pair[0], :], thetas_flip[pair[1], :] = \ thetas_flip[pair[1], :], thetas_flip[pair[0], :].copy() return thetas_flip
e19a274953a94c3fb4768bcd6ede2d7556020ab2
703,672
def surface_analysis_function_for_tests(surface, a=1, c="bar"): """This function can be registered for tests.""" return {'name': 'Test result for test function called for surface {}.'.format(surface), 'xunit': 'm', 'yunit': 'm', 'xlabel': 'x', 'ylabel': 'y', 'series': [], 'alerts': [dict(alert_class='alert-info', message="This is a test for a surface alert.")], 'comment': f"a is {a} and c is {c}"}
e6e58c172687ce3e0782abb07b9154afae9356cb
703,673
def from_time (year=None, month=None, day=None, hours=None, minutes=None, seconds=None, microseconds=None, timezone=None): """ Convenience wrapper to take a series of date/time elements and return a WMI time of the form yyyymmddHHMMSS.mmmmmm+UUU. All elements may be int, string or omitted altogether. If omitted, they will be replaced in the output string by a series of stars of the appropriate length. @param year The year element of the date/time @param month The month element of the date/time @param day The day element of the date/time @param hours The hours element of the date/time @param minutes The minutes element of the date/time @param seconds The seconds element of the date/time @param microseconds The microseconds element of the date/time @param timezone The timeezone element of the date/time @return A WMI datetime string of the form: yyyymmddHHMMSS.mmmmmm+UUU """ def str_or_stars (i, length): if i is None: return "*" * length else: return str (i).rjust (length, "0") wmi_time = "" wmi_time += str_or_stars (year, 4) wmi_time += str_or_stars (month, 2) wmi_time += str_or_stars (day, 2) wmi_time += str_or_stars (hours, 2) wmi_time += str_or_stars (minutes, 2) wmi_time += str_or_stars (seconds, 2) wmi_time += "." wmi_time += str_or_stars (microseconds, 6) wmi_time += str_or_stars (timezone, 4) return wmi_time
61d2bf9fb36225990ac0ac9d575c3931ef66e9f6
703,674
def plural(num, one, many): """Convenience function for displaying a numeric value, where the attached noun may be both in singular and in plural form.""" return "%i %s" % (num, one if num == 1 else many)
f29753d25e77bcda2fb62440d8eb19d9bd332d1e
703,675
import math import time def test_query_retry_maxed_out( mini_sentry, relay_with_processing, outcomes_consumer, events_consumer ): """ Assert that a query is not retried an infinite amount of times. This is not specific to processing or store, but here we have the outcomes consumer which we can use to assert that an event has been dropped. """ request_count = 0 outcomes_consumer = outcomes_consumer() events_consumer = events_consumer() @mini_sentry.app.endpoint("get_project_config") def get_project_config(): nonlocal request_count request_count += 1 print("RETRY", request_count) return "no", 500 RETRIES = 1 query_timeout = 0.5 # Initial grace period # Relay's exponential backoff: INITIAL_INTERVAL = 1s; DEFAULT_MULTIPLIER = 1.5; for retry in range(RETRIES): # 1 retry query_timeout += 1 * 1.5 ** (retry + 1) relay = relay_with_processing( {"limits": {"query_timeout": math.ceil(query_timeout)}} ) try: relay.send_event(42) time.sleep(query_timeout) outcomes_consumer.assert_dropped_internal() assert request_count == 1 + RETRIES for (_, error) in mini_sentry.test_failures[:-1]: assert isinstance(error, AssertionError) assert "error fetching project states" in str(error) _, last_error = mini_sentry.test_failures[-1] assert "failed to resolve project information" in str(last_error) finally: mini_sentry.test_failures.clear()
9339078c432cd087dcdbf799cad8d881defb41c2
703,676
def dollo_parsimony(phylo_tree, traitLossSpecies): """ simple dollo parsimony implementation Given a set of species that don't have a trait (in our hash), we do a bottom up pass through the tree. If two sister species have lost the trait, then the ancestor of both also lost it. Otherwise, the ancestor has the trait. """ for node in phylo_tree.get_terminals(): if node.name in traitLossSpecies: node.trait = False else: node.trait = True for node in phylo_tree.get_nonterminals(order='postorder'): child_traits = [child.trait for child in node] node.trait = bool(sum(child_traits)) return phylo_tree
7929006e1625ba502642fcbd44c0dfff44569777
703,677
def _to_bytes(str_bytes): """Takes UTF-8 string or bytes and safely spits out bytes""" try: bytes = str_bytes.encode('utf8') except AttributeError: return str_bytes return bytes
fd16c24e80bdde7d575e430f146c628c0000bf9a
703,678
import os def tile_num(fname): """ extract tile number from file name. """ l = os.path.splitext(fname)[0].split('_') # fname -> list i = l.index('tile') # i is the index in the list return int(l[i+1])
9f86598d5614fc986676491ad8b238960609148a
703,679
def determine_host(environ): """ Extract the current HTTP host from the environment. Return that plus the server_host from config. This is used to help calculate what space we are in when HTTP requests are made. """ server_host = environ['tiddlyweb.config']['server_host'] port = int(server_host['port']) if port == 80 or port == 443: host_url = server_host['host'] else: host_url = '%s:%s' % (server_host['host'], port) http_host = environ.get('HTTP_HOST', host_url) if ':' in http_host: for port in [':80', ':443']: if http_host.endswith(port): http_host = http_host.replace(port, '') break return http_host, host_url
6e91f93d5854600fe4942f093de593e53aaf2aa0
703,680
def read_messages(message_file): """ (file open for reading) -> list of str Read and return the contents of the file as a list of messages, in the order in which they appear in the file. Strip the newline from each line. """ # Store the message_file into the lst as a list of messages. lst = message_file.readlines() # Strip the newline from each string in the list. for i in range(len(lst)): lst[i] = lst[i].strip() return lst
0e4b1a6995a6dd25ab3783b53e730d0dd446747c
703,681
import os def _ToGypPath(path): """Converts a path to the format used by gyp.""" if os.sep == '\\' and os.altsep == '/': return path.replace('\\', '/') return path
a2f4864c7a2cc844716ef17fffcd088843f23b3a
703,682
def hello_world1(): """ Flask endpoint :return: TXT """ return "Hello World From App 1!"
fed4dcf04234ca8c47885d0702f284c13136f52f
703,683
def cradmin_titletext_for_role(context, role): """ Template tag implementation of :meth:`django_cradmin.crinstance.BaseCrAdminInstance.get_titletext_for_role`. """ request = context['request'] cradmin_instance = request.cradmin_instance return cradmin_instance.get_titletext_for_role(role)
8e6a29c369c5ae407701c12dc541e82dda31f193
703,684
import sys import traceback def get_raising_file_and_line(tb=None): """Return the file and line number of the statement that raised the tb Returns: (filename, lineno) tuple """ if not tb: tb = sys.exc_info()[2] filename, lineno, _context, _line = traceback.extract_tb(tb)[-1] return filename, lineno
f6b0b7878f0a4a322eb4d1ea3f6bdbf1b6ee7530
703,685
def compute_average_oxidation_state(site): """ Calculates the average oxidation state of a site Args: site: Site to compute average oxidation state Returns: Average oxidation state of site. """ try: avg_oxi = sum([sp.oxi_state * occu for sp, occu in site.species_and_occu.items() if sp is not None]) return avg_oxi except AttributeError: pass try: return site.charge except AttributeError: raise ValueError("Ewald summation can only be performed on structures " "that are either oxidation state decorated or have " "site charges.")
8ea8611984f171a84a2bac17c0b49b70c85bfba4
703,687
import math def cos(x, offset=0, period=1, minn=0, maxx=1): """A cosine curve scaled to fit in a 0-1 range and 0-1 domain by default. offset: how much to slide the curve across the domain (should be 0-1) period: the length of one wave minn, maxx: the output range """ value = math.cos((x/period - offset) * math.pi * 2) / 2 + 0.5 return value*(maxx-minn) + minn
e3119dc71c1b6c6160a29dca37b51b0550479a83
703,688
from os import access,W_OK def iswritable(pathname): """Is file or folder writable?""" return access(pathname,W_OK)
705c8ecb9c5d2d3b7aeef6e6e99838ff72ed27f1
703,689
def mean_of_list(list_in): """Returns the mean of a list Parameters ---------- list_in : list data for analysis Returns ------- mean : float result of calculation """ mean = sum(list_in) / len(list_in) return(mean)
fac8f40b86e7fa37f96a46b56de722c282ffc79c
703,691
from collections import OrderedDict def read_markers_gmt(filepath): """ Read a marker file from a gmt. """ ct_dict = OrderedDict() with open(filepath) as file_gmt: for line in file_gmt: values = line.strip().split('\t') ct_dict[values[0]] = values[2:] return ct_dict
a45ed9da13c9ba4110bb4e392338036a32a58e60
703,692
import logging def get_all_annots(annotations): """ All annotations """ all_annots = set() for genome in annotations.keys(): for annot_name in annotations[genome].keys(): all_annots.add(annot_name) logging.info(' No. of annotation columns: {}'.format(len(all_annots))) return sorted(all_annots)
98270d18fbc8ded648a16178087037b1319263d4
703,693
def construct_select_bijlagen_query(bericht_uri): """ Construct a SPARQL query for retrieving all bijlages for a given bericht. :param bericht_uri: URI of the bericht for which we want to retrieve bijlagen. :returns: string containing SPARQL query """ q = """ PREFIX schema: <http://schema.org/> PREFIX nfo: <http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#> PREFIX nie: <http://www.semanticdesktop.org/ontologies/2007/01/19/nie#> PREFIX dct: <http://purl.org/dc/terms/> SELECT DISTINCT ?bijlagenaam ?file ?type WHERE {{ <{0}> a schema:Message; nie:hasPart ?bijlage. ?bijlage a nfo:FileDataObject; nfo:fileName ?bijlagenaam; dct:format ?type. ?file nie:dataSource ?bijlage. }} """.format(bericht_uri) return q
56e9868ddc38c703ac383508cce4e446f0f566a4
703,694
def check_convergence(x): """ Check for convergence of the sampler """ return False
989d94991eeecd414c3f6ff85b2d2bc2801d5cbc
703,695
def add_to_leftmost(branch, val): """adds value to the leftmost part of the branch and returns the modified branch and 0. OR returns unchanged change and val if the val cannot be added""" if val == 0: return branch, val if type(branch) is int: return branch + val, 0 # add to children, will do nothing is val become 0 left, val = add_to_leftmost(branch[0], val) right, val = add_to_leftmost(branch[1], val) return [left, right], val
1c2c3bdccfcb6f4966b9bf9228f092ee17ca49f9
703,696
def normalize_list_of_dict_into_dict(alist): """ Info is generated as a list of dict objects with a single key. @alist - the list in question. @return - normalized dict with multiple keys """ result = {} for element in alist: for key in element.keys(): result[key] = element[key] return result
8de00b0923d07b99085ca3b4d694960aae9fc7f5
703,697
import argparse def get_args(): """Get our arguments""" parser = argparse.ArgumentParser() parser.add_argument('filename', metavar='F', type=str, nargs=1, help='File to load') parser.add_argument('-a', '--annealing', action='store_true', default=False, help='Use Simulated Annealing Algorithm?') parser.add_argument('-g', '--genetic', action='store_true', default=False, help='Use Genetic Algorithm?') parser.add_argument('-n', '--numdistricts', type=int, default=None, help=('Number of districts to form. Defaults to the ' 'width of the system')) parser.add_argument('-z', '--animate', action='store_true', default=False, help='Animate algorithms?') parser.add_argument('-p', '--precision', type=int, default=1000, help=('Tweak precision, lower is less. ' 'In a nutshell, how many loops to run.')) parser.add_argument('-r', '--report', action='store_true', default=False, help='Generate all assets for the report') parser.add_argument('-j', '--gif', action='store_true', default=False, help='Generate gif versions of animations?') parser.add_argument('-F', '--full', action='store_true', default=False, help='Generate everything. Report assets, SA, and GA.') args = parser.parse_args() args.filename = args.filename[0] # We only allow 1 file at a time. return args
33e82867f37b1934f9622076459402beb2cb3214
703,699
import logging def make_error_logger(name, level, filename): """ Création d'un Logger d'erreur :param name: nom du logger :param level: niveau de logging :param filename: nom du fichier d'erreur :return: logger """ formatter = logging.Formatter("%(asctime)s %(levelname)s - %(message)s", "%d/%m %H:%M:%S") sth_err = logging.FileHandler(filename) sth_err.setFormatter(formatter) logger = logging.getLogger(name) logger.addHandler(sth_err) logger.setLevel(level) return logger
0d78faa4657af348c06755298c2e1d3f717cd092
703,700
from typing import Dict from typing import Any from typing import Iterable from typing import Optional from typing import Tuple def kwargs_from_config( config: Dict[str, Any], required_keys: Iterable[str], optional_keys: Iterable[str], renames: Optional[Iterable[Tuple[str, str]]] = None, ) -> Dict[str, Any]: """ Extract from a dictionary a dictionary with only those keys specified as required_keys or optional_keys. Keys that appear in 'renames' are treated as optional. """ kwargs = {k: config[k] for k in required_keys} kwargs.update({k: config[k] for k in optional_keys if k in config}) if renames is not None: for old_name, new_name in renames: if old_name in config: kwargs[new_name] = config[old_name] return kwargs
b3acef60b87dc8bb4c00157c169d1968c8751100
703,701
import os import os.path as op from glob import glob from warnings import warn def bids_scan_file_walker(dataset=".", include_types=None, warn_no_files=False): """ Traverse a BIDS dataset and provide a generator interface to the imaging files contained within. :author: @chrisfilo https://github.com/preprocessed-connectomes-project/quality-assessment-prot\ ocol/blob/master/scripts/qap_bids_data_sublist_generator.py :param str dataset: path to the BIDS dataset folder. :param list(str) include_types: a list of the scan types (i.e. subfolder names) to include in the results. Can be any combination of "func", "anat", "fmap", "dwi". :param bool warn_no_files: issue a warning if no imaging files are found for a subject or a session. :return: a list containing, for each .nii or .nii.gz file found, the BIDS identifying tokens and their values. If a file doesn't have an identifying token its key will be None. """ def _no_files_warning(folder): if not warn_no_files: return warn("No files of requested type(s) found in scan folder: %s" % folder, RuntimeWarning, stacklevel=1) def _walk_dir_for_prefix(target_dir, prefix): return [x for x in next(os.walk(target_dir))[1] if x.startswith(prefix)] def _tokenize_bids_scan_name(scanfile): scan_basename = op.splitext(op.split(scanfile)[1])[0] # .nii.gz will have .nii leftover scan_basename = scan_basename.replace(".nii", "") file_bits = scan_basename.split('_') # BIDS with non ses-* subfolders given default # "single_session" ses. file_tokens = {'scanfile': scanfile, 'sub': None, 'ses': 'single_session', 'acq': None, 'rec': None, 'run': None, 'task': None, 'modality': file_bits[-1]} for bit in file_bits: for key in file_tokens.keys(): if bit.startswith(key): file_tokens[key] = bit return file_tokens ######### if include_types is None: # include all scan types by default include_types = ['func', 'anat', 'fmap', 'dwi'] subjects = _walk_dir_for_prefix(dataset, 'sub-') if len(subjects) == 0: raise GeneratorExit("No BIDS subjects found to examine.") # for each subject folder, look for scans considering explicitly # defined sessions or the implicit "single_session" case. for subject in subjects: subj_dir = op.join(dataset, subject) sessions = _walk_dir_for_prefix(subj_dir, 'ses-') for scan_type in include_types: # seems easier to consider the case of multi-session vs. # single session separately? if len(sessions) > 0: subject_sessions = [op.join(subject, x) for x in sessions] else: subject_sessions = [subject] for session in subject_sessions: scan_files = glob(op.join( dataset, session, scan_type, '*.nii*')) if len(scan_files) == 0: _no_files_warning(session) for scan_file in scan_files: yield _tokenize_bids_scan_name(scan_file)
f3a4f3e1c96073e89fd69ff2768570c1f0667f9f
703,703
def insert_dim(arg, pos=-1): """insert 1 fake dimension inside the arg before pos'th dimension""" shape = [i for i in arg.shape] shape.insert(pos, 1) return arg.reshape(shape)
921cd27894df9910dbc12b31db6eb1f73d47f180
703,704
def encode_captions(captions): """ Convert all captions' words into indices. Input: - captions: dictionary containing image names and list of corresponding captions Returns: - word_to_idx: dictionary of indices for all words - idx_to_word: list containing all words - vocab_size: number of words """ word_counts = {} for name, caption_list in captions.items(): for caption in caption_list: for word in caption.split(): if word not in word_counts: word_counts[word] = 1 else: word_counts[word] += 1 idx_to_word = ['<START>', '<END>', '<PAD>'] + [w for w in word_counts if w not in ['<START>', '<END>', '<PAD>']] word_to_idx = {} for i in range(len(idx_to_word)): word_to_idx[idx_to_word[i]] = i vocab_size = len(idx_to_word) return word_to_idx, idx_to_word, vocab_size
2ba216c844723b0925b46d0db7bc8afd6ce0f5b4
703,705
def load_stop_words(stop_word_file): """ Utility function to load stop words from a file and return as a list of words @param stop_word_file Path and file name of a file containing stop words. @return list A list of stop words. """ stop_words = [] for line in open(stop_word_file): if line.strip()[0:1] != "#": for word in line.split(): # in case more than one per line stop_words.append(word) return stop_words
8127aeec8db8f7bc87130ea0d1e5faa4998ac86f
703,706
def NullFlagHandler(feature): """ This handler always returns False """ return False
7d37ecc8518144b27b43580b7273adf5f68dfdfb
703,707
import copy def find_paths(orbital_graph, starting_node, ending_node, visited_nodes=None): """Recursively find all the paths from starting_node to ending_node in the graph Paths are returned as a list of paths, where paths are a list of nodes. An empty list means that no valid path exists. """ path = copy.copy(visited_nodes) if visited_nodes else [] if starting_node not in orbital_graph: return [] # We hit an dead end if starting_node in path: return [] # We hit a loop paths = [] path.append(starting_node) for node in orbital_graph[starting_node]: if node == ending_node: # We've found it! path.append(node) paths.append(path) else: paths += find_paths(orbital_graph, node, ending_node, path) return paths
55a47542c3d70bbc1f5c722c1e87908e10b3d0e5
703,708
def find_matching_nodes(search_for, search_in, matches=[]): """ Search Vertex tree 'search_in' for the first isomorphic occurance of the Vertex tree search_for Return a list of [(x,y)...] for node in search_for (x) matched with a pair (y) from search in, such as the two graphs preserve their linking vectors. From this, we might be able to determine the wider context, and therefore provide a suggestion as to how to compose out search_for, in the style of search_in. Return None, if a match cannot be made. If allow_partial is True, then in the event that a complete set of matches cannot be made, return only those nodes for which a twin can be found. matches - is s a list of matches to ignore (allowing the callee to 'mine' for alternatives). """ matches = [] for v1 in search_for: # at each root node in search_for, we start to compose a # temporary tree, to hold our solution temp_tree = None found_match = False if v1 in [x for (x,y) in matches]: # the node v2 has already been matched, so pass continue for v2 in search_in: if v2 in [y for (x,y) in matches]: # the node v2 has already been matched, so pass continue # so we have found a new, unexplored node. Start building a potential solution: solution = [] # fan out the neighbours of both nodes: temp_tree = v2.neighbours to_match = v1.neighbours while len(to_match) > 0: vectors_to_match = [v for (v,n) in to_match] vectors_temp_tree = [v for (v,n) in temp_tree] # we ask; are all the vectors joining the current node of 'search_in' # to its neighbours, to be found at the reciprocal point search_in? if len(list(filter(lambda x: x in vectors_temp_tree, vectors_to_match))) != 0: # Ok, they match so far. Add each of these neighbours to the expanding solution: for a,b in to_match: for x,y in temp_tree: if a == x: solution.append((b,y)) else: temp_tree.remove((x,y)) # now we drill down to the next layer of neighbour nodes on both sides: _temp_tree = [] for (v,n) in temp_tree: _temp_tree = _temp_tree + n.neighbours _to_match = [] for (v,n) in to_match: _to_match = _to_match + n.neighbours to_match = _to_match temp_tree = _temp_tree else: # in this case trees do not match, will not explore this avenue further break # at the point that to_match is empty, we have found a complete, matching tree if to_match == []: found_match = True matches = matches + [(v1,v2)] + solution break if not found_match: # Did not find a match anywhere for v1 and its children return [] # 'happy path' outcome, isomorphic match was located return matches
9e6696533f7b5e313075fadade8b42fe6f09f0cf
703,709
def distance_between_points(p1, p2): """ Function that computes the euclidean distance between to points. Returns: float: distance value """ return ((p1['x']-p2['x']) * (p1['x'] - p2['x']) + (p1['y']-p2['y']) * (p1['y']-p2['y'])) ** 0.5
b8cb563f13f64f0511525e5428d47d9228220915
703,711
def _get(pseudodict, key, single=True): """Helper method for getting values from "multi-dict"s""" matches = [item[1] for item in pseudodict if item[0] == key] if single: return matches[0] else: return matches
f68156535d897dd719b05d675e66cadc284ce1a3
703,712
import glob def datedfile(filename,date): """ select file based on observation date and latest version Parameters ---------- filename: text file name pattern, including "yyyymmdd_vnn" place holder for date and version date: yyyymmdd of observation Returns: file name """ filelist = sorted(glob.glob(filename.replace('yyyymmdd_vnn','????????_v??'))) if len(filelist)==0: return "" dateoffs = filename.find('yyyymmdd') datelist = [file[dateoffs:dateoffs+8] for file in filelist] file = filelist[0] for (f,fdate) in enumerate(datelist): if date < fdate: continue for (v,vdate) in enumerate(datelist[f:]): if vdate > fdate: continue file = filelist[f+v] return file
203cf848e351ef9b8b77bda62d5850b35485762a
703,713
import random import string def random_user(n): """generate a random user id of size n""" chars = [] for i in range(n): chars.append(random.choice(string.ascii_lowercase)) return ''.join(chars)
21d8ec2ef8b275ffca481e4553ec396ff4010653
703,714
def _get_parse_input(parse_args, args_in, dict_in): """Return default for parse_input. This is to decide if context_parser should run or not. To make it easy on an API consumer, default behavior is ALWAYS to run parser UNLESS dict_in initializes context and there is no args_in. If dict_in specified, but no args_in: False If dict_in specified, AND args_in too: True If no dict_in specified, but args_in is: True If no dict_in AND no args_in: True If parse_args explicitly set, always honor its value. Args: parse_args (bool): Whether to run context parser. args_in (list[str]): String arguments as passed from the cli. dict_in (dict): Initialize context with this dict. Returns: Boolean. True if should parse input. """ if parse_args is None: return not (args_in is None and dict_in is not None) return parse_args
64dcfd32a3d9f66749a27d4b26bd5fb3a66edf28
703,715
import argparse def get_parser(): """ Creates and returns the argument parser for jExam Returns: ``argparse.ArgumentParser``: the argument parser for jExam """ parser = argparse.ArgumentParser() parser.add_argument("master", type=str, help="Path to exam master notebook") parser.add_argument("result", nargs="?", default="dist", help="Path at which to write output notebooks") parser.add_argument("-f", "--format", type=str, default="otter", help="Name of autograder format; 'otter' or 'ok'") parser.add_argument("-s", "--seed", type=int, default=None, help="Random seed for NumPy to run before execution") parser.add_argument("-q", "--quiet", default=False, action="store_true", help="Run without printing status") return parser
03e433f3b3cdb371dff74489f619f0e65311f5dd
703,716
import os def find_source_filename(source_name, dir_path): """Find the filename matching the source/module name in the specified path. For example searching for "queue" might return "queue.py" or "queue.pyc" """ source_filenames = [ os.path.join(dir_path, source_name + ext) for ext in (".py", "pyc")] for source_filename in source_filenames: if os.path.exists(source_filename): return source_filename return None
0360e57d4071c389d28768946551ad041236e6e3
703,717
def MergeDictsRecursively(original_dict, merging_dict): """ Merges two dictionaries by iterating over both of their keys and returning the merge of each dict contained within both dictionaries. The outer dict is also merged. ATTENTION: The :param(merging_dict) is modified in the process! :param dict original_dict :param dict merging_dict """ items = iter(original_dict.items()) for key, value in items: try: other_value = merging_dict[key] MergeDictsRecursively(value, other_value) del merging_dict[key] except KeyError: continue except TypeError: continue except AttributeError: continue try: original_dict.update(merging_dict) except ValueError: raise TypeError( 'Wrong types passed. Expecting two dictionaries, got: "%s" and "%s"' % (type(original_dict).__name__, type(merging_dict).__name__) ) return original_dict
43174a7f5163a36eb850bc2c4d0f557790920189
703,718
import argparse def parse_args(): """ Parse command line arguments for CLI :return: namespace containing the arguments passed. """ parser = argparse.ArgumentParser() parser.add_argument( '--login', type=str, required=True, help="Full path to file containing JSON with DB login credentials", ) parser.add_argument( '--project_id', type=str, default=None, help="Project ID substring (first part of dataset ID)", ) parser.add_argument( '--microscope', type=str, default=None, help="Substring of microscope column", ) parser.add_argument( '--start_date', type=str, default=None, help="Find >= dates in date_time column", ) parser.add_argument( '--end_date', type=str, default=None, help="Find <= dates in date_time column", ) parser.add_argument( '--description', type=str, default=None, help="Find substring in description column", ) return parser.parse_args()
0982407f808c9af9996bae0a36e8ae252cae0df6
703,720
def _split_hdf5_path(path): """Return the group and dataset of the path.""" # Make sure the path starts with a leading slash. if not path.startswith('/'): raise ValueError(("The HDF5 path '{0:s}' should start with a " "leading slash '/'.").format(path)) if '//' in path: raise ValueError(("There should be no double slash in the HDF5 path " "'{0:s}'.").format(path)) # Handle the special case '/'. if path == '/': return '/', '' # Temporarily remove the leading '/', we'll add it later (otherwise split # and join will mess it up). path = path[1:] # We split the path by slash and we get the head and tail. _split = path.split('/') group_path = '/'.join(_split[:-1]) name = _split[-1] # Make some consistency checks. assert not group_path.endswith('/') assert '/' not in name # Finally, we add the leading slash at the beginning of the group path. return '/' + group_path, name
f0f8bba67254e3616a80c26b58fdcb91db00a49b
703,721
def rebuildEventList(events, eventList = None): """ Add all events (top and nested) from event trees to a list. """ if eventList == None: eventList = [] for event in events: if event not in eventList: eventList.append(event) for arg in event.arguments: if arg.target.id[0] == "E": rebuildEventList([arg.target], eventList) return eventList
404ac02e6807214c82d30e465766a4e7af89016b
703,722
def _pack_64b_int_arg(arg): """Helper function to pack a 64-bit integer argument.""" return ((arg >> 56) & 0xff), ((arg >> 48) & 0xff), ((arg >> 40) & 0xff), ((arg >> 32) & 0xff), \ ((arg >> 24) & 0xff), ((arg >> 16) & 0xff), ((arg >> 8) & 0xff), (arg & 0xff)
274fadb627de9ac47bff34c8e55db545b8e6cf0a
703,723
def list_array_paths(path, array_dict): """ Given a dictionary containing each directory (experiment folder) as a key and a list of array data files (analysis of array containing Log2Ratio data) as its value (i.e., the output of 'find_arrays'), returns a list of full paths to array files. """ array_path_list = [] for key_folder in array_dict: for array_value in array_dict[key_folder]: array_path_list.append(path + key_folder + "/" + array_value) return array_path_list
79d5f58a97005fb915de290ae8ccc480fbacd3c0
703,724
def log_simple(n, k): """ A function that simply finds how many k's does n have. For example 28 = 2 * 2 * 7, so log_simple(28, 2) will return 2 and log_simple(28, 7) will return 1 """ log_result = 0 while (n % k == 0): log_result += 1 n /= k return n, log_result
22bda2911aa14a5866759cc0e5d8bf377b372bd7
703,725
def fitparams_for_update(fitparams): """ Extracts a dictionary of fitparameters from modelmiezelb.io.ContrastData.fitparams for updating e.g. a sqe model via modelmiezelb.sqe_model.SqE.update_params Parameters ---------- fitparams : modelmiezelb.io.ContrastData.fitparam Return ------ : dict parameters re-packed for update_params """ return {k : v[0] for k, v in fitparams["params"].items()}
b4f2ddf26dbdcb37105da4dcf23602fec19bf4e1
703,726
def spinChainProductSum(spins): """ Calculate the Ising nearest neighbor interactions of a spin chain, periodic boundary condition(PBC). Parameters ---------- spins : list of ints or floats The given spin under PBC. Returns float The nearest neighbor interactions(products). """ res = 0.0 n = len(spins) for i in range(n): res += spins[i] * spins[i + 1 - n] return res
0f115c3284f5680b28d1648140c8618de873e16c
703,728
def has_ext_state(section: str, key: str) -> bool: """ Return whether extended state exists for given section and key. Parameters ---------- section : str Extended state section. key : str Extended state key. Returns ------- has_ext_state : bool """ has_ext_state = bool(RPR.HasExtState(section, key)) # type:ignore return has_ext_state
2483763cbe05f404331d8dfe8a1112fc15b70029
703,729
import re def _escape_for_regex(text): """Escape ``text`` to allow literal matching using egrep""" regex = re.escape(text) # Seems like double escaping is needed for \ regex = regex.replace("\\\\", "\\\\\\") # Triple-escaping seems to be required for $ signs regex = regex.replace(r"\$", r"\\\$") # Whereas single quotes should not be escaped regex = regex.replace(r"\'", "'") return regex
c7d9866dfe4b9c96e500a43d3726db8e7ea73532
703,730
def color_to_256(color): """Convert color into ANSI 8-bit color format. Red is converted to 196 This converter emits the 216 RGB colors and the 24 grayscale colors. It does not use the 16 named colors. """ output = 0 if color.r == color.g == color.b: # grayscale case if color.r == 0: # pure black output = 16 elif color.r == 255: # pure white output = 231 else: output = 232 + int(color.r / 256 * 24) else: # 216-color RGB scale = lambda c: int(c / 256 * 6) output = 16 output += scale(color.b) output += scale(color.g) * 6 output += scale(color.r) * 6 * 6 return output
3fc747404f393d1adc04de06f59903593979a2a1
703,731
def breadcrumb(*args): """"Render a breadcrumb trail Args: args (list) : list of urls and url name followed by the final name Example: url1, name1, url2, name2, name3 """ def pairs(l): a = iter(l) return list(zip(a,a)) return { 'urls': pairs(args[:-1]), 'page': args[-1] }
dd4fbd6c130da497a1f38c876685dc3f17298efb
703,732
def translate(value, leftMin, leftMax, rightMin, rightMax): """ Normalize the data in range rightMin and rightMax :param value: Value to be normalize :param leftMin: original min value :param leftMax: original max value :param rightMin: final min value :param rightMax: final max value :return: Normalized value """ leftSpan = leftMax - leftMin rightSpan = rightMax - rightMin valueScaled = float(value - leftMin) / float(leftSpan) return rightMin + (valueScaled * rightSpan)
2cc02618edaec4112d30a4f61de9c95d5e8a0f8b
703,734
import os def get_html_theme_path(): """Return the html theme path for this template library. :returns: List of directories to find template files in """ curdir = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) return [curdir]
f37e9b99cff0b4c87f78dd4fc5a9612908eb97d3
703,735
def get_sqrt_2(): """Returns an approximation for the square root of 2""" return 1.41421356
56c24f16bc27b9b40b771ed545cc553b817f8260
703,736
from typing import Dict from typing import Optional def get_gsx_entry_value(entry: Dict[str, Dict[str, str]], field: str) -> Optional[str]: """Returns the `entry` value for the given `field`.""" if not entry or not field: return None field = f"gsx${field}" if field not in entry: return None return entry[field]["$t"].strip()
788c0a3e99691bfa81386c6fc2b5ea05332c06fd
703,737
from typing import Sequence import re def parse_fvalues(fvalues: Sequence) -> frozenset: """ Parse a sequence of fvalues as a frozenset. This function is mostly used for parsing string provided by the user, splitting them accordingly, but accepts any sequence type. If a string is passed, it will use different delimiters and guarantees that all methods will allow the same delimiters. Delimiters can be white spaces, commas, semicolons, forward slashes, and the " and " substring. @param fvalues: The sequence with the fvalues to be parsed. @return: A frozenset with the fvalues. """ if isinstance(fvalues, str): # We internally convert everything to spaces for delimiter in [" and ", ",", ";", "/"]: fvalues = fvalues.replace(delimiter, " ") fvalues = re.sub(r"\s+", " ", fvalues.strip()) fvalues = fvalues.split() return frozenset(fvalues)
7c6356b5320e6a7056f615bf5a324edbe7c66e47
703,738
def get_resume(text): """ :param text: text to resume :return: first paragraphe in the text """ # regex = re.compile(r"^(.*?)\n") # return regex.match(text).group(1) return text[:500]
f247222de19cc131ecdb99400be2a8957cc5ea56
703,739
def compute_q10_correction(q10, T1, T2): """Compute the Q10 temperature coefficient. As explained in [1]_, the time course of voltage clamp recordings are strongly affected by temperature: the rates of activation and inactivation increase with increasing temperature. The :math:`Q_{10}` temperature coefficient, a measure of the increase in rate for a 10 :math:`^{\circ}C` temperature change, is a correction factor used in HH-style models to quantify this temperature dependence. In HH-style models, the adjustment due to temperature can be achieved by decreasing the time constants by a factor :math:`Q_{10}^{(T_2 - T_1)/10}`, where the temperatures :math:`T_1 < T_2`. The temperature unit must be either the Celsius or the Kelvin. Note that :math:`T_1` and :math:`T_2` must have the same unit, and do not need to be exactly 10 degrees apart. Parameters ---------- q10 : :obj:`float` The :math:`Q_{10}` temperature coefficient. T1 : :obj:`float` Temperature at which the first rate is recorded. T2 : :obj:`float` Temperature at which the second rate is recorded. Returns ------- correction : :obj:`float` Correction factor due to temperature. References ---------- .. [1] D. Sterratt, B. Graham, A. Gillies, D. Willshaw, "Principles of Computational Modelling in Neuroscience", Cambridge University Press, 2011. """ # that the test below allows T1 = T2 is intentional; the function should # accomendate for no correction, i.e. a correction factor equal to 1. if T1 > T2: msg = ("T2 must be greater than or equal to T1") raise ValueError(msg) return q10**((T2 - T1) / 10)
eed7d7f38c1f9d98b1a6a89a28eb4f1a6656b6c7
703,740
def without(array, *values): """Creates an array with all occurrences of the passed values removed. Args: array (list): List to filter. values (mixed): Values to remove. Returns: list: Filtered list. Example: >>> without([1, 2, 3, 2, 4, 4], 2, 4) [1, 3] .. versionadded:: 1.0.0 """ return [item for item in array if item not in values]
21bddf5244a591a261f704557fb8017a2401ef77
703,741
def get_total_mnsp_ramp_rate_violation(model): """Get total MNSP ramp rate violation""" ramp_up = sum(v.value for v in model.V_CV_MNSP_RAMP_UP.values()) ramp_down = sum(v.value for v in model.V_CV_MNSP_RAMP_DOWN.values()) return ramp_up + ramp_down
9e326a70966edce51f82036977fcec1b26991c21
703,742
import re def parse_traceroute(raw_result): """ Parse the 'traceroute' command raw output. :param str raw_result: traceroute raw result string. :rtype: dict :return: The parsed result of the traceroute command in a \ dictionary of the form: :: {1: {'time_stamp2': '0.189', 'time_stamp3': '0.141', 'time_stamp1': '0.217', 'hop_num': 1, 'int_hop': '50.1.1.4' }, 2: {'time_stamp2': '0.144', 'time_stamp3': '0.222', 'time_stamp1': '0.216', 'hop_num': 2, 'int_hop': '40.1.1.3' }, 'probe': 3, 'min_ttl': 1, 'dest_addr': '10.1.1.10', 'max_ttl': 30, 'time_out': 3 } """ traceroute_re1 = ( r'(.*\s+(?P<dst_unreachable>!H)\s*?.*)' ) traceroute_re2 = ( r'(\s*(?P<hop_number>\d+)\s+(?P<hop_timeout>(\*\s+)+))' ) traceroute_re3 = ( r'.*\s*(?P<network_unreachable>(Network is unreachable))\s*' ) traceroute_re4 = ( r'\s*traceroute to\s+(?P<dest_addr>(\d+.\d+.\d+.\d+))\s+' ) traceroute_re5 = ( r'.*\s+(?P<min_ttl>\d+)\s+hops min,' r'.*\s+(?P<max_ttl>\d+)\s+hops max,' r'.*\s+(?P<time_out>\d+)\s+sec. timeout,' r'.*\s+(?P<probe>\d+)\s+probes' ) traceroute_re6 = ( r'(\s*(?P<hop_num>\d+)\s+(?P<int_hop>(\d+.\d+.\d+.\d+))\s+' r'(?P<time_stamp1>(\d+.\d+))ms\s+' r'((?P<time_stamp2>(\d+.\d+))ms\s+)?' r'((?P<time_stamp3>(\d+.\d+))ms\s+)?' r'((?P<time_stamp4>(\d+.\d+))ms\s+)?' r'((?P<time_stamp5>(\d+.\d+))ms\s*)?.*)' ) result = {} re_result1 = re.search(traceroute_re1, raw_result) if re_result1: for key, value in re_result1.groupdict().items(): if value is None: result[key] = 'No match found' elif value.isdigit(): result[key] = int(value) else: result[key] = value return result re_result2 = re.search(traceroute_re2, raw_result) if re_result2: for key, value in re_result2.groupdict().items(): if value is None: result[key] = 'No match found' elif value.isdigit(): result[key] = int(value) else: result[key] = value return result re_result3 = re.search(traceroute_re3, raw_result) if re_result3: for key, value in re_result3.groupdict().items(): if value is None: result[key] = 'No match found' elif value.isdigit(): result[key] = int(value) else: result[key] = value return result raw_result_lines = raw_result.splitlines() length = len(raw_result_lines) re_result4 = re.search(traceroute_re4, raw_result) if re_result4: for key, value in re_result4.groupdict().items(): if value is None: result[key] = "No match found" elif value.isdigit(): result[key] = int(value) else: result[key] = value re_result5 = re.search(traceroute_re5, raw_result) if re_result5: for key, value in re_result5.groupdict().items(): if value is None: result[key] = "No match found" elif value.isdigit(): result[key] = int(value) else: result[key] = value for hop_num in range(1, length): result[hop_num] = {} re_result6 = re.search(traceroute_re6, raw_result_lines[hop_num]) if re_result6: for key, value in re_result6.groupdict().items(): if value is None: result[hop_num][key] = "No match found" elif value.isdigit(): result[hop_num][key] = int(value) else: result[hop_num][key] = value return result
2a12d72a4e2e9a64287c65525b7eca6997849f97
703,743
import math def ecliptic_obliquity_radians(time): """Returns ecliptic obliquity radians at time.""" return math.radians(23.439 - 0.0000004 * time)
384199a506d29cb14b2a42facf2d6c46bf44f111
703,745
def equations_to_matrix() -> list: """ :return: augmented matrix formed from user input (user inputs = linear equations) :rtype: list """ n = int(input("input number of rows ")) m = int(input("input number of columns ")) A = [] for row_space in range(n): print("input row ", row_space + 1) row = input().split() if len(row) == m: row_map = list(map(int, row)) # use map function convert string to integer A.append(row_map) else: print("length must be the column size of A") equations_to_matrix() print(A) return A
702c252fed2d7127e4e5f9e5433ca4c29867138c
703,746
def same_origin(origin1, origin2): """ Return True if these two origins have at least one common ASN. """ if isinstance(origin1, int): if isinstance(origin2, int): return origin1 == origin2 return origin1 in origin2 if isinstance(origin2, int): return origin2 in origin1 return len(origin1.intersection(origin2)) > 0
1fbc55d9dcfb928c173128a5b386cc6375ec0cde
703,747
def format_outcome_results(outcome_results): """ Cleans up formatting of outcome_results DataFrame :param outcome_results: outcome_results DataFrame :return: Reformatted outcomes DataFrame """ new_col_names = {"links.learning_outcome": "outcome_id"} outcome_results = outcome_results.rename(columns=new_col_names) outcome_results["outcome_id"] = outcome_results["outcome_id"].astype("int") outcome_results = outcome_results.sort_values( ["links.user", "outcome_id", "submitted_or_assessed_at"] ) return outcome_results
9c22481725f2782d614b48582edfcd60db284c13
703,748
from win32api import GetSystemMetrics def _get_max_width(): """Hamta information om total skarmbredd och -hojd """ #Hamta information om total skarmbredd over alla anslutna skarmar width = GetSystemMetrics(78) #Hamta information om total skarmhojd over alla anslutna skarmar height = GetSystemMetrics(79) return width, height
e2382eab98faecd7d8cf9ba2689897d2512c39db
703,750
from datetime import datetime import functools import time def function_timer(func): """This is a timer decorator when defining a function if you want that function to be timed then add `@function_timer` before the `def` statement and it'll time the function Arguments: func {function} -- it takes a function for this decorator to work Returns: this will print out the time taken and the time the function started and completed """ @functools.wraps(func) def wrapper_timer(*args, **kwargs): start_time = time.time() # start_date = datetime.now().strftime("%m/%d/%Y, %H:%M:%S") # print(f"The function {func.__name__} started at {start_date}") value = func(*args, **kwargs) elapsed_time = time.time() - start_time stop_date = datetime.now().strftime("%m/%d/%Y, %H:%M:%S") if elapsed_time > 60 <= 3600: print( f"The function {func.__name__} took: {round(elapsed_time/60, 3)} minutes at {stop_date}" ) elif elapsed_time > 3600: print( f"The function {func.__name__} took: {round((elapsed_time/60)/60, 3)} hours at {stop_date}" ) else: print(f"The function {func.__name__} took: {round(elapsed_time, 3)} seconds") return value return wrapper_timer
6ddcca82ae60aafb2c072e62497f8b27d557ccdc
703,752
def breadcrumbs(category): """ Renders a category tree path using a customizable delimiter. Usage:: {% breadcrumbs <category> %} Example:: {% breadcrumbs category %} """ return {'ancestors': category.get_ancestors()}
3c83a7ad7e8ae30ad297fd9d3d7aa5ffa5631449
703,753
from typing import List def combine_results_dicts(results_summaries: List[dict]) -> dict: """For a list of dictionaries, each with keys 0..n-1, combine into a single dictionary with keys 0..ntot-1""" combined_summary = {} n_overall = 0 for d in results_summaries: n_this = len(d) for i in range(n_this): combined_summary[i + n_overall] = d.pop(i) n_overall += n_this return combined_summary
67e5654b3f4b045526bc181ddb9b05eb9f7ce018
703,754
def StringToId(peg_positions): """ input a list of strings representing peg positions returns the game bitfield as integer number """ my_string = [''] * 36 cur_pos = 0 cur_bitfield = 0 for row in ['A', 'B', 'C', 'D', 'E', 'F']: for col in ['1', '2', '3', '4', '5', '6']: my_string[cur_pos] = row + col cur_pos += 1 for this_peg in peg_positions: cur_bitfield = cur_bitfield | (2 ** my_string.index(this_peg)) return cur_bitfield
71845dd2a9166bf1e43fc68040de81f93806b322
703,755
import sys def notebook_is_active() -> bool: """Return if script is executing in a IPython notebook (e.g. Jupyter notebook)""" for x in sys.modules: if x.lower() == 'ipykernel': return True return False
200962d831c75d636b310aafa0c8cc4e664e0b4a
703,758
import argparse def build_parser(): """Parser to grab and store command line arguments""" MINIMUM = 200000 SAVEPATH = "data/raw/" parser = argparse.ArgumentParser() parser.add_argument( "subreddit", help="Specify the subreddit to scrape from") parser.add_argument("-m", "--minimum", help="Specify the minimum number of data records to collect. For load data option, this means min number of new records to collect.", type=int, default=MINIMUM) parser.add_argument("-s", "--savepath", help="Save/load folder", type=str, default=SAVEPATH) parser.add_argument("-l", "--load", help="Load existing samples to continue scraping", action="store_true") parser.add_argument("-g", "--gui", help="Call this flag when running from Javascript GUI", action="store_true") return parser
d4f3eb484423416d3cb83ad64784747a8f453d98
703,759
def lzip(*args): """ this function emulates the python2 behavior of zip (saving parentheses in py3) """ return list(zip(*args))
92aa6dea9d4058e68764b24eb63737a2ec59a835
703,760
def sanitize_url(url: str) -> str: """ This function strips to the protocol, e.g., http, from urls. This ensures that URLs can be compared, even with different protocols, for example, if both http and https are used. """ prefixes = ["https", "http", "ftp"] for prefix in prefixes: if url.startswith(prefix): url = url[len(prefix) :] return url
9c61a9844cfd6f96e158a9f663357a7a3056abf0
703,761
def image_to_world(bbox, size): """Function generator to create functions for converting from image coordinates to world coordinates""" px_per_unit = (float(size[0])/bbox.width, float(size[1]/bbox.height)) return lambda x,y: (x/px_per_unit[0] + bbox.xmin, (size[1]-y)/px_per_unit[1] + bbox.ymin)
35fcfbf8e76e0ec627da9bf32a797afdae11fe17
703,762
def find_kern_timing(df_trace): """ find the h2d start and end for the current stream """ kern_begin = 0 kern_end = 0 for index, row in df_trace.iterrows(): if row['api_type'] == 'kern': kern_begin = row.start kern_end = row.end break; return kern_begin, kern_end
2e121e7a9f7ae19f7f9588b0105f282c59f125ba
703,763
def overlap_branches(targetbranch: dict, sourcebranch: dict) -> dict: """ Overlaps to dictionaries with each other. This method does apply changes to the given dictionary instances. Examples: >>> overlap_branches( ... {"a": 1, "b": {"de": "ep"}}, ... {"b": {"de": {"eper": 2}}} ... ) {'a': 1, 'b': {'de': {'eper': 2}}} >>> overlap_branches( ... {}, ... {"ne": {"st": "ed"}} ... ) {'ne': {'st': 'ed'}} >>> overlap_branches( ... {"ne": {"st": "ed"}}, ... {} ... ) {'ne': {'st': 'ed'}} >>> overlap_branches( ... {"ne": {"st": "ed"}}, ... {"ne": {"st": "ed"}} ... ) {'ne': {'st': 'ed'}} Args: targetbranch(dict): Root where the new branch should be put. sourcebranch(dict): New data to be put into the sourcebranch. """ if not isinstance(sourcebranch, dict): return sourcebranch for key, newItem in sourcebranch.items(): if key not in targetbranch: targetbranch[key] = newItem elif isinstance(targetbranch[key], dict): targetbranch[key] = overlap_branches(targetbranch[key], newItem) else: targetbranch[key] = newItem return targetbranch
a11b54b72d4a7d79d0bfaa13ed6c351dd84ce45f
703,764
def make_legend_labels(dskeys=[], tbkeys=[], sckeys=[], bmkeys=[], plkeys=[], dskey=None, tbkey=None, sckey=None, bmkey=None, plkey=None): """ @param dskeys : all datafile or examiner keys @param tbkeys : all table keys @param sckeys : all subchannel keys @param bmkeys : all beam keys @param plkeys : all polarization keys @param dskey : datafile or examiner key @param tbkey : table key @param sckey : subchannel key @param bmkey : beam key @param plkeys :polarization key """ label = "" if dskey != None and len(dskeys) > 1: label += "ds"+str(dskey+1) if tbkey != None and len(tbkeys) > 1: label += " tb"+str(tbkey+1) if sckey != None and len(sckeys) > 1: label += " sc"+str(sckey+1) if bmkey != None and len(bmkeys) > 1: label += " B"+str(bmkey+1) if plkey != None and len(plkeys) > 1: label += "P"+str(plkey+1) return label
a8b17916f896b7d8526c5ab7ae3cf4a7435627e2
703,765
import collections def get_interface_config_vlan(): """ Return the interface configuration parameters for all IP static addressing. """ parameters = collections.OrderedDict() parameters['VLAN'] = 'yes' return parameters
61ef6affba231af19e4030c54bfcaaaa15a6438f
703,766
def normalize_string(value): """ Normalize a string value. """ if isinstance(value, bytes): value = value.decode() if isinstance(value, str): return value.strip() raise ValueError("Cannot convert {} to string".format(value))
86d8134f8f83384d83da45ed6cb82841301e2e52
703,767
from distutils.version import StrictVersion from distutils.spawn import find_executable import re import os def get_versions(): """ Try to find out the versions of gcc and ld. If not possible it returns None for it. """ gcc_exe = find_executable('gcc') if gcc_exe: out = os.popen(gcc_exe + ' -dumpversion','r') try: out_string = out.read() finally: out.close() result = re.search('(\d+\.\d+\.\d+)', out_string, re.ASCII) if result: gcc_version = StrictVersion(result.group(1)) else: gcc_version = None else: gcc_version = None # EMX ld has no way of reporting version number, and we use GCC # anyway - so we can link OMF DLLs ld_version = None return (gcc_version, ld_version)
3774f0fe270733512b3a6c1cb3e361a1cb90a362
703,768
def mapAddress(name): """Given a register name, return the address of that register. Passes integers through unaffected. """ if type(name) == type(''): return globals()['RCPOD_REG_' + name.upper()] return name
21f2f9a085d259d5fd46b258cc3ee0298fdda158
703,769
def list_index(ls, indices): """numpy-style creation of new list based on a list of elements and another list of indices Parameters ---------- ls: list List of elements indices: list List of indices Returns ------- list """ return [ls[i] for i in indices]
7e5e35674f48208ae3e0befbf05b2a2e608bcdf0
703,770
import io import re def copyright_present(f): """ Check if file already has copyright header. Args: f - Path to file """ with io.open(f, "r", encoding="utf-8") as fh: return re.search('Copyright', fh.read())
afbffde0ab51984dab40d296f8ad9ca29829aef1
703,771
import math def calc_LFC(in_file_2, bin_list): """ Mods the count to L2FC in each bin """ #for itereating through the bin list bin_no=0 header_line = True with open(in_file_2, 'r') as f: for bin_count in f: if header_line: header_line = False else: bin_count = bin_count.strip().split(',') try: FC = bin_list[bin_no][3]/float(bin_count[3]) LFC = math.log(FC, 2) except: LFC = 0 bin_list[bin_no][3] = round(LFC, 2) bin_no+=1 return bin_list
379035fa4972c956d9734b958f3e81a3792c96d6
703,772