content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def get_list_index(imageList, index): """ This function returns the imageList value at an index :param imageList: :param index: :return: """ return imageList[index]
d27319ee73a250421d7f4a68158dcd796c438ba4
14,823
def count_unique_peaks(peaks): """Returns the number of unique peaks.""" n_unique = 0 for chrom in peaks.chroms: for peak in peaks.fetch(chrom): if not peak.iscommon: n_unique += 1 return n_unique
53da151e1e3c95ecd0bf73234878d5d1e63cce33
14,835
def hex_str_to_int(hex_str: str): """'#ffffff' -> 0xffffff""" if "#" in hex_str: return int(hex_str[1:], 16) else: return int(hex_str, 16)
65fc72fcec909a5062a4881f032d9f1bbe4aeecb
14,840
def xlsw_write_row(ws, row_idx, row, fmt=None): """ ws: row_idx: row number row: a list, data to write fmt: format for cell """ for col_idx in range(len(row)): ws.write(row_idx, col_idx, row[col_idx], fmt) row_idx += 1 return row_idx
4d6da85ff95c97a30c05511cb6995e946b7ccb80
14,842
def get_repository_metadata_by_id(app, id): """Get repository metadata from the database""" sa_session = app.model.session return sa_session.query(app.model.RepositoryMetadata).get(app.security.decode_id(id))
ec7d532e3463b29f2e9dfb72cec71212f0c49f85
14,845
import base64 def numpy_to_json(np_arr): """Encodes a numpy array to a json-serializable dict""" # TFJS only has types float32, int32, bool, string and complex64 dtype_map = { 'b': 'bool', 'i': 'int32', 'u': 'int32', 'S': 'string', 'O': 'string', 'U': 'string', 'c': 'complex64', 'f': 'float32', } dtype = dtype_map[np_arr.dtype.kind] result = { 'shape': list(np_arr.shape), 'dtype': dtype, } if dtype == 'string': result['data'] = np_arr.flatten().tolist() else: # This is faster for JSON to parse and can represent inf/nan values result['dataEnc'] = base64.encodebytes(np_arr.astype(dtype).tobytes()).decode() return result
426b6c38388008ed2ca02e119ea07169dd39d38e
14,846
import torch def gram_matrix(input_data): """ Gram Matrix for Style Loss Module a = batch size (1) b = number of feature maps c*d = number of features in a feature map Note: This specification is specific to 2d convolution """ a, b, c, d = input_data.size() features = input_data.view(b, a * c * d) G = torch.mm(features, features.t()) return G.div(a * b * c * d)
6bf808534a52576a2559789c8d9080363e564a0c
14,850
def humanize(memory, suffix="B", kilo=1024): """ Scale memory to its proper format e.g: 1253656 => '1.20 MiB' 1253656678 => '1.17 GiB' """ if kilo == 1000: units = ["", "k", "M", "G", "T", "P"] elif kilo == 1024: units = ["", "Ki", "Mi", "Gi", "Ti", "Pi"] else: raise ValueError("kilo must be 1000 or 1024!") for unit in units: if memory < kilo: return f"{memory:.2f} {unit}{suffix}" memory /= kilo
7afd033e2ead94ca8853a347ad9c8d1fa47a5e64
14,851
def split_decimal(flt): """ split decimal params: flt : <float> float to convert returns: <tuple> ( whole number, decimal part) """ whole, dec = str(flt).split('.') # dec = int(dec) # if dec >= 5: # return int(whole) + 1, dec return [int(whole), int(dec)]
4175b3370591028bb96b5bf3eaceee3391c0b1f3
14,852
def team_rm_from_repository_payload(team_default_payload): """Provide a team payload for removing a team from a repository.""" removed_from_repository_payload = team_default_payload removed_from_repository_payload["action"] = "removed_from_repository" return removed_from_repository_payload
9ffd60f86fa0e4daee0b5c4d73890653e64a47c7
14,856
from pathlib import Path from typing import List def get_packages_from_requirements(basedirpath: Path,) -> List: """Extract packages from requirements.txt as Python list""" with open(basedirpath, "r") as f: lines = f.readlines() return lines
ca0c85e6175f7026781607bce06ed42d6344db1a
14,859
def get_attributes_and_labels(data): """ :param data: The dataset to be divided :return: Two panda frames which are in order of classes and attributes """ # Here we divide our attributes and classes features for a given dataset return [data.iloc[:, -1], data.iloc[:, :-1]]
c383dad4720093b4002415b48d793bab5834c3fd
14,860
def _code_snippet(snippet: str) -> str: """Generates a markdown code snippet based on python code. Args: snippet (str): Python code. Returns: str: Markdown code snippet. """ result = "```python\n" result += snippet + "\n" result += "```\n\n" return result
5c44bccba225dbd6d60f19c712393b291fe065b5
14,861
def remove_conflicts(applicant, potential_editors): """ Remove editors from potential editors who might be sources of conflict of interest. These are typically by name or university. """ return [editor for editor in potential_editors if not (editor["name"] in applicant["conflicts-faculty"] or editor["universities"] in applicant["conflicts-university"])]
49fe7dc482e7f53114f78cf9ed353eded87ecba4
14,863
def mat_diff(mat_a, mat_b): """ Function that subtracts two matrices: mat_a and mat_b. The subtraction can be carried out if the two matrices have same dimension, i.e. same number of rows and columns. The elements of the resulting matrix, mat_c, are c_ij = a_ij - b_ij :param mat_a: list of lists with user defined a_ij elements :param mat_b: list of lists with user defined b_ij elements :return: mat_c = mat_a - mat_b, list of lists with elements c_ij = a_ij - b_ij """ # check if operation can be done if len(mat_a) == len(mat_b) and len(mat_a[0]) == len(mat_b[0]): print("The subtraction of the two matrices is:") pass else: return "You cannot subtract these matrices! They need to have same dimensions!\n" # contain number of rows and columns nr_rows = len(mat_a) nr_cols = len(mat_a[0]) # initialize the resulting mat_c mat_c = [[0.0 for idx in range(nr_cols)] for jdx in range(nr_rows)] # update elements of mat_c: c_ij = a_ij + b_ij for row in range(nr_rows): mat_c[row] = [(elements[0]-elements[1]) for elements in zip(mat_a[row], mat_b[row])] return mat_c
0441ea751a3a6c9fb64e9b3a33420fd4b0f8aa3a
14,868
def compile_column(name: str, data_type: str, nullable: bool) -> str: """Create column definition statement.""" null_str = 'NULL' if nullable else 'NOT NULL' return '{name} {data_type} {null},'.format(name=name, data_type=data_type, null=null_str)
e1db7bfc78509d394de645b90394f8d796f97a5b
14,878
def dbdisconnect(connection) -> bool: """Close connection to SQLite-Database :param connection: :return: Result of success (true/false) """ if connection: connection.close() return True return False
6c5d17d14e898696885730ef86fad9eab03af02f
14,889
import base64 def diff_kubernetes_secret(secret, desired_fields): """Computes a set of changed fields (either added, removed, or modified) between the given existing secret and the set of desired fields. :param secret: an existing secret as a KubernetesAPIResponse containing encoded secret data :type secret: KubernetesAPIResponse :param desired_fields: a dict of desired fields :type desired_fields: dict[str, str] :return: set[str] """ current_keys = set(secret.data.keys()) desired_keys = set(desired_fields.keys()) differences = current_keys.symmetric_difference(desired_keys) for field in current_keys.intersection(desired_keys): decoded_bytes = base64.b64decode(secret.data[field]) decoded_str = decoded_bytes.decode('utf-8') if decoded_str != desired_fields[field]: differences.add(field) return differences
eb3d219a40bc68ba00d3d876aade936f8b062e75
14,896
def find_indexes_where_lists_differ(list1: list, list2: list) -> list: """This function returns the indexes where the two input lists differ. THe input lists are expected to have same length Args: list1 (list): first input list list2 (list): second input list Returns: out_list (list): output list containing the indexes where the two input list differ Raises: AssertionError: if the two input lists do not have the same length """ assert len(list1) == len(list2), "The two input lists must have same length" out_list = [idx for idx, (first, second) in enumerate(zip(list1, list2)) if first != second] return out_list
888d850b692a1920058f53a4617dcd08d5ed31cc
14,897
def values_of(choices): """ Returns a tuple of values from choices options represented as a tuple of tuples (value, label). For example: .. sourcecode:: python >>> values_of(( ... ('1', 'One'), ... ('2', 'Two'),)) ('1', '2') :rtype: tuple """ return tuple([value for value, label in choices])
1b152cfab229d2eddd5bf0ff3cbf53954b1934e5
14,901
def objc_provider_framework_name(path): """Returns the name of the framework from an `objc` provider path. Args: path: A path that came from an `objc` provider. Returns: A string containing the name of the framework (e.g., `Foo` for `Foo.framework`). """ return path.rpartition("/")[2].partition(".")[0]
cafb53a64cc8654a992d325faa34721a19d5a38a
14,905
import json def extract_session_parameters(response_json: str): """Extracts session parameters from the json response of a Dialogflow webhook.""" response = json.loads(response_json) return response["session_info"]["parameters"]
45cde22d7271512e4f90fc3078c327c9ccbd86b0
14,913
def scalar_multiply(c, u): """ return the vector u scaled by the scalar c """ return tuple((c * a for a in u));
9ace577e852179893e90544754b8010d3476f3ae
14,914
def grouper_orig(cc): """grouper based on gaps in timestamps""" diffs = cc.timestamps().diff() grpr_orig = (diffs>cc.timedelta).cumsum() grpr_orig.name = 'g_orig' return grpr_orig
ecc755be0d107a3ac86c35069546ac920c4d42ce
14,917
def make_save_string(file_name): """ Returns a complete save string for saving a model. ====================================== Input: file_name (string) - Filename used in save string. Output: save_string (string) - Save string for specified model. """ # Filepath for saved file filepath = r'C:\Developer\electric_motor_thermal_modelling\Models' filepath_full = filepath + '\\' + file_name return filepath_full
fb00690f4c5a4d7063220918709a8a45daa0a538
14,919
def _get_job_dir(jenkins_directory, job_name): """ Returns the directory for a job configuration file relative to the jenkins home directory. """ return jenkins_directory + '/jobs/' + job_name
b77a56efa394b4d43209b7f47da96a4f5f0ca96b
14,924
def check_n_levels(parsed, n): """Make sure there are n levels in parsed""" if len(parsed) == n: return True else: return False
f4040e4cf53730e61e5c06fed5dbe8f271020c6a
14,926
import re import glob def find_files(input_path, framerange=None): """ Discovers files on the filesystem. :param input_path: Path to the file sequence :param framerange: optional framerange :return: array of single file paths """ files = [] if '%' not in input_path: return [input_path] if framerange: for part_range in framerange.split(','): if '-' in part_range: first, last = part_range.split('-') for i in range(int(first), int(last) + 1): files.append(input_path % i) else: files.append(input_path % int(part_range)) else: input_path = re.sub(r'(\%0[4-8]d)(\.[exr|EXR])', r'*\2', input_path) files = glob.glob(input_path) files = sorted(files) return files
8f2818b6c8b72f344c70adee0e79ac52c2313902
14,927
import pathlib def _parse_requirements(path: pathlib.Path): """Read and strip comments from a requirements.txt-like file. """ lines = [line.strip() for line in path.read_text().splitlines() if line] return [line for line in lines if not line.startswith('#')]
af010076d79cca83bfbc6590b833e1bf5f68ddcd
14,928
from typing import Iterable def detokenize(tokens: Iterable[str], with_treebank: bool = True) -> str: """ Given a list of tokens, join them together into a string. with_treebank = True is typically used when rendering utterances, so we don't need to deal with things like "andrew's" with_treebank = False is typically for rendering express. """ if with_treebank: return " ".join(tokens).replace(" ", " ") return "".join(tokens)
d6a90c5181a18fe45118b83bdf605c534ceeeac8
14,932
def pair(k1, k2): """ Cantor pairing function """ z = int(0.5 * (k1 + k2) * (k1 + k2 + 1) + k2) return z
83de1d237049ac8f76ec09f7b23037ab581792d5
14,933
import json def rpc_error(message = 'Invalid Request'): """ Generates an rpc error message """ return json.dumps({"result":None, 'error':{'message':message}, 'id':1})
4aa061e25be938b882cac71cb935f2153ad958cf
14,935
def extract_bits(n, n_bits, offset_from_lsb): """Extract a number of bits from an integer. Example: >>> bin(extract_bits(0b1101011001111010, n_bits=5, offset_from_lsb=7)) '0b1100' 0b1101011001111010 -> 0b01100 ^^^^^<- 7 -> The bits marked with ^ will be extracted. The offset is counted from the LSB, with the LSB itself having the offset 0. """ try: bitmask = (2**n_bits - 1) << offset_from_lsb except TypeError as err: raise ValueError(err) return (n & bitmask) >> offset_from_lsb
08a92d319975915b3b0f453144cc73cba620c5b1
14,943
def get_species_units(input): """ Retrieve units for GEOS-CF species and derived values """ if 'NOy/' in input: units = 'unitless' elif 'pm25' in input: units = '$\mu$g m$^{-3}$' elif 'Dust' in input: units = '$\mu$g m$^{-3}$' elif 'NOy' in input: units = 'ppbv' else: units = 'v/v' return units
5daa3a065afd99cc713e3084c4e694d9fa789ccc
14,950
def get_git_sha_from_dockerurl(docker_url: str, long: bool = False) -> str: """ We encode the sha of the code that built a docker image *in* the docker url. This function takes that url as input and outputs the sha. """ parts = docker_url.split("/") parts = parts[-1].split("-") sha = parts[-1] return sha if long else sha[:8]
4bb598da76ada2fdf34fdc3ff24bf046dbeaf9bf
14,951
def get_token(context): """ Retrieves the token the lambda was assigned when it was invoked. :param context: AWS Lambda context :type context: object :return: Lambda token, usually a UUID :rtype: str """ # If that fails, fall back to the requestID # http://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html return context.aws_request_id
83fced7364d6fad8d632edfb0d82efb7bb7cf5de
14,954
def nonlinearitywarning(band, bin_ix, events, verbose=0): """ Flag count rates above the 10% local nonlinearty dropoff, per the calibration paper. :param band: The band that is being used, either 'FUV' or 'NUV'. :type band: str :param bin_ix: Array indices designating which events are in the time bin of interest. :type bin_ix: numpy.ndarray :param events: Set of photon events to check if they are in the non-linearity regime. :type events: dict :param verbose: Verbosity level, a value of 0 is minimum verbosity. :type verbose: int :returns: bool -- Returns True/False whether a given set of events are at the non-linearity regime. """ cps_10p_rolloff = {'NUV':311, 'FUV':109} cps = events['flat_counts'][bin_ix]/events['exptime'][bin_ix] return True if cps >= cps_10p_rolloff[band] else False
2a85bf6c6100e39caef169f57b26c72b6d22e257
14,955
def cx2x(cx, e): """Transform from *x* index value to *x* index value, using the *e.css('cw')* (column width) as column measure.""" if cx is None: x = 0 else: x = e.parent.pl + cx * (e.css('cw', 0) + e.gw) return x
5d04d7f017c0fa54e19c649133139b2e4439562f
14,957
def q(s): """ Quote the given string """ return "'" + str(s) + "'"
0c0a1477e740b430d5e6997c0115ef317457526c
14,958
def _diff_count(string1, string2): """ Count the number of characters by which two strings differ. """ assert isinstance(string1, str) assert isinstance(string2, str) if string1 == string2: return 0 minlen = min(len(string1), len(string2)) diffcount = abs(len(string1) - len(string2)) for ii in range(0,minlen): if string1[ii] != string2[ii]: diffcount += 1 return diffcount
82c9ef604db3afee79266e4eb7851164ff43bfd1
14,962
def getInversePermutation(permutation): """Invert a given permutation vector. :param list | tuple | np.ndarray permutation: Permutation vector to invert. :return: Inverted permutation vector. :rtype: list """ inverse = [0] * len(permutation) for i, p in enumerate(permutation): inverse[p] = i return inverse
f8fe339f2b0a7fc8180252b99f3fce742f215a13
14,964
def is_image(content_type): """is the given content_type string for an image? Args: content_type: string containing Content-Type HTTP header value Returns: Boolean """ return (str(content_type).count("image") > 0)
c69f163e09d8b68e179dd8765ef8401d988ac433
14,967
import base64 def read_encoded(filename: str) -> str: """Read a file and return the base64 encoding of its contents.""" with open(filename, 'rb') as infile: return base64.b64encode(infile.read()).decode('utf-8')
4d2c5c8f36b2ebe67a67714f6313a32d4d6bf005
14,969
import re def strip_irc_colors(data): """Strip mirc colors from string. Expects data to be decoded.""" return re.sub('[\x02\x0F\x16\x1D\x1F]|\x03(\d{1,2}(,\d{1,2})?)?', '', data)
2c4e480cc00e3704a0086522c8bb7409fca0af6e
14,970
def normalised_ellipse_mask(ellipse): """Return a normalized copy of the supplied ellipse. Here 'normalised' means that the rotation is as close to zero as possible. Examples: >>> normalised_ellipse_mask( ... ((1, 2), (100, 200), 90) ... ) ((1, 2), (200, 100), 0) """ # Don't overwrite the original, we'll return a new ellipse. centre, extents, rotation = ellipse centre = list(centre[:]) extents = list(extents[:]) # Get the rotation as close to zero as possible. while rotation > 45: extents[0], extents[1] = extents[1], extents[0] rotation -= 90 while rotation < -45: extents[0], extents[1] = extents[1], extents[0] rotation += 90 return tuple(centre), tuple(extents), rotation
af5316ace76642667340a986f3d1c2d69934a333
14,971
def probability_of_sum(total:int, dice1, dice2): """ Brief: Basic probability - Dice cast Suppose a pair of fair 6-sided dice are thrown. What is the probability that the sum of the rolls is 6? (Answer as a simple fraction of integers) reference: https://statweb.stanford.edu/~susan/courses/s60/split/node65.html """ n = dice1.shape[0] m = dice2.shape[0] comb = n * m count = 0 for i in dice1: for j in dice2: sum = int(i + j) if sum == total: count += 1 prob = count / comb return print("{:.2%}".format(prob))
b48e9db4dd40048d24a3dfb481406346076b7132
14,974
def get_source_with_id(result): """Return a document's `_source` field with its `_id` added. Parameters ---------- result : dict A document from a set of Elasticsearch search results. Returns ------- dict The document's `_source` field updated with the doc's `_id`. """ result['_source'].update({'_id': result['_id']}) return result['_source']
be9b25ad65a8474aa41d3f927664abdb89a674d5
14,975
def apply_x_frame(response): """Include X-frame header in http response to protect against clickhiJacking.""" response.headers["X-Frame-Options"] = "SAMEORIGIN" return response
3cc3b572eb64e856512fd8b41191f601bcbcc96c
14,976
def multiply_probs(ln_pa, ln_pb): """ Returns the log probability of multiplied probabilities. Pc = Pa * Pb => ln(Pc) = ln(Pa)+ln(Pb) """ return ln_pa+ln_pb
5e541072c9aaf327536196ab4ffc9e3bf488ccf0
14,977
def get_height(image): """get_height(image) -> integer height of the image (number of rows). Input image must be rectangular list of lists. The height is taken to be the number of rows. """ return len(image)
3aa94c4b2458d2a233f32ee10889e52566c04ecb
14,979
def get_query_range(count: int, page: int): """Generate query for range of the search results :type count: ``int`` :param count: Max amount of the search results :type page: ``int`` :param page: Current page, depends on count :return: A query range :rtype: ``str`` """ if page < 1: raise ValueError('page value can\'t be less than 1') if count < 1: raise ValueError('max results value can\'t be less than 1') return f'from={(page-1)*count + 1}&to={page*count}'
c29285c56eed77275d2ec28a45de148ce3b7f591
14,987
def single_file_output(in_file): """ Extract sample name from file ie. /path/to/test.fa --> test :param in_file: path/to/file (str) :return: extracted sample name (str) """ return in_file.split('/')[-1].split('.')[-2]
4dd5c5f0de584f479fdf91e88cdede33c8072f10
14,993
def VerifyFileID(fi_disk, fi_ours): """Verifies that two file IDs are matching. Differences in the inode/device are not accepted, but and older timestamp for fi_disk is accepted. @param fi_disk: tuple (dev, inode, mtime) representing the actual file data @param fi_ours: tuple (dev, inode, mtime) representing the last written file data @rtype: boolean """ (d1, i1, m1) = fi_disk (d2, i2, m2) = fi_ours return (d1, i1) == (d2, i2) and m1 <= m2
c32acadd21b249904374eb44bcab3db42fe3972c
14,995
import six def list_as_string(option): """Returns the argument as a string. Useful for insuring that ConfigObj options are always returned as a string, despite the presence of a comma in the middle. Example: >>> print(list_as_string('a string')) a string >>> print(list_as_string(['a', 'string'])) a, string >>> print(list_as_string('Reno, NV')) Reno, NV """ # Check if it's already a string. if option is not None and not isinstance(option, six.string_types): return ', '.join(option) return option
0bd269fb455ce8eb20c4c0838fd4611832686baf
14,999
def rewriter(field, rules): """Create a template field function that rewrites the given field with the given rewriting rules. ``rules`` must be a list of (pattern, replacement) pairs. """ def fieldfunc(item): value = item._values_fixed[field] for pattern, replacement in rules: if pattern.match(value.lower()): # Rewrite activated. return replacement # Not activated; return original value. return value return fieldfunc
c9cfba499f1dc8683df3918a9f497ec69ad07700
15,002
def similarity(event, places): """ Return a list of dissimilar events to the given event. This is the successor function used in the search tree: a node's successors are the list of dissimilar options for the next choice. event (dict): A potential place/event. places (list): All potential places. return: List of dissimilar places. """ dissimilar = [] for place in places: similarity = 0 if place["price"] == event["price"]: similarity += 1. for type1 in event["types"]: for type2 in place["types"]: if type1 == type2: similarity += 1.0 / float(len(place["types"])) # 1.67 is the empirically generated threshold for similarity # The empirical process behind this is described in the paper if similarity <= 1.7: dissimilar.append(place) return dissimilar
ca9a171abe37e7e9d48a604ca92c56092d4fa8c8
15,004
def get_varinfo_from_table(discipline,parmcat,parmnum): """ Return the GRIB2 variable information given values of `discipline`, `parmcat`, and `parmnum`. NOTE: This functions allows for all arguments to be converted to a string type if arguments are integer. Parameters ---------- **`discipline`**: `int` or `str` of Discipline code value of a GRIB2 message. **`parmcat`**: `int` or `str` of Parameter Category value of a GRIB2 message. **`parmnum`**: `int` or `str` of Parameter Number value of a GRIB2 message. Returns ------- **`list`**: containing variable information. "Unknown" is given for item of information if variable is not found. - list[0] = full name - list[1] = units - list[2] = short name (abbreviated name) """ if isinstance(discipline,int): discipline = str(discipline) if isinstance(parmcat,int): parmcat = str(parmcat) if isinstance(parmnum,int): parmnum = str(parmnum) try: tblname = 'table_4_2_'+discipline+'_'+parmcat modname = '.section4_discipline'+discipline exec('from '+modname+' import *') return locals()[tblname][parmnum] except(ImportError,KeyError): return ['Unknown','Unknown','Unknown']
d6e3b08dfcbfb023a2725ef1ffbb44448881fa64
15,006
import random def evolve(pop, mut_rate, mu, lambda_): """ Evolve the population *pop* using the mu + lambda evolutionary strategy :param pop: a list of individuals, whose size is mu + lambda. The first mu ones are previous parents. :param mut_rate: mutation rate :return: a new generation of individuals of the same size """ pop = sorted(pop, key=lambda ind: ind.fitness) # stable sorting parents = pop[-mu:] # generate lambda new children via mutation offspring = [] for _ in range(lambda_): parent = random.choice(parents) offspring.append(parent.mutate(mut_rate)) return parents + offspring
e2510d0ce92d0c5703b9166778f48581db4aca2f
15,007
import re def strip_comments(source_code): """ Strips comments from source code to increase analysis accuracy. The function is similar to strip_quotations. It removes all comments from the source_code. It is also used to prevent false positives. It uses a regex to match and remove line comments and then block comments. :param source_code: the string that contains source code :return: same source code as source but without any comments """ line_comment_regex = "\\/\\/.*\\n" block_comment_regex = "\\/\\*(.|[\\r\\n])*?\\*\\/" line_comments_removed = re.sub(line_comment_regex, "\n", source_code) all_comments_removed = re.sub(block_comment_regex, "\n", line_comments_removed) return all_comments_removed
7e3842940367caaae875ec6c24a1a40fdf7efbc0
15,008
def make_ratings_hash(ratings): """ Make a hashtable of ratings indexed by itemId and pointing to the vector (genres, decade) that fully characterize an item. """ rhash = {} # For every rating, check if the relevant item is already in the map. # If not, add it to the map. Key is item_id, mapped value is the vector # consisting of the possible genres and the decade of the movie. for row_indx, itemid in ratings['itemid'].iteritems(): if itemid not in rhash: itemData = ratings.ix[row_indx, 'Action' : 'decade'] rhash[itemid] = itemData return rhash
e105f08dbdbec544dccdcd299aaf189ef64691b7
15,012
def get_rules(rules_module): """ Get rule functions from module :param rules_module: module with rules implementations :return: rule functions """ rules = [] for item in dir(rules_module): if item.startswith("rule"): rules.append(getattr(rules_module, item)) return rules
41b9428f0893a153700a19f4237453af8910a759
15,014
import requests def _download_chunk(args): """Download a single chunk. :param args: Tuple consisting of (url, start, finish) with start and finish being byte offsets. :return: Tuple of chunk id and chunk data """ idx, args = args url, start, finish = args range_string = '{}-'.format(start) if finish is not None: range_string += str(finish) response = requests.get(url, headers={'Range': 'bytes=' + range_string}) # Actual HTTP get download request return idx, response.content
eb88e1200fff8d336908d2247e12c46576bb4422
15,017
import torch def discounted_cumsum(rewards, discount): """Calculates the cummulative sum of discounted rewards Arguments: rewards {torch.Tensor} -- rewards discount {float} -- discount factor Returns: [type] -- cummulative sum of discounted rewards """ discount **= torch.arange(0, rewards.shape[0]) disc_cumsum = torch.cumsum(discount * rewards, 0).flip() return disc_cumsum
b0e2cd45c4cb6882a84784c64a20ae64995f26a6
15,019
def _StripPC(addr, cpu_arch): """Strips the Thumb bit a program counter address when appropriate. Args: addr: the program counter address cpu_arch: Target CPU architecture. Returns: The stripped program counter address. """ if cpu_arch == "arm": return addr & ~1 return addr
9127f10cbbeb71814f6c9f30e7446d5f8233bb67
15,021
def domains_to_xyt(xyt, domains): """ This function takes in xyt data and a list of domains and converts the list of domains into a list of xyt points within those domains. This list can then be used to graph the domains onto the entire worm path, for visualization. :param xyt: A list of xyt points. :param domains: A list of domains, which are themselves a list of two values, representing time indices that frame a period of zero movement in the worm path. :return: Three lists, each one representing values of x, y, and t within the given input domains. These can be zipped together to get a list of xyt points within the domains. """ x, y, t = zip(*xyt) domains_x = [] domains_y = [] domains_t = [] for domain in domains: left = domain[0] right = domain[1] domains_x.extend(x[left:right]) domains_y.extend(y[left:right]) domains_t.extend(t[left:right]) return domains_x, domains_y, domains_t
56bb2614d3f612913b19c550ed042e6a635ae4fa
15,028
def replicaset_status(client, module): """ Return the replicaset status document from MongoDB # https://docs.mongodb.com/manual/reference/command/replSetGetStatus/ """ rs = client.admin.command('replSetGetStatus') return rs
eb46cd1e28ecb1f2c6c5a222d176438677bd8e8c
15,029
from textwrap import dedent def minimal_html_page( body: str, css: str = "", title: str = "Standalone HTML", lang: str = "en" ): """Return a template for a minimal HTML page.""" return dedent( """\ <!DOCTYPE html> <html lang="{lang}"> <head> <meta charset="utf-8"> <title>{title}</title> <style> {css} </style> </head> <body> {body} </body> </html> """ ).format(title=title, lang=lang, css=css, body=body)
71397e60fefab240dbd0b173e437ef90be4b8493
15,033
def _common_strategies(choices): """Generate some common strategies to deal with multiple references.""" return {'min': min(choices), 'max': max(choices), 'avg': sum(choices) * 1. / len(choices) }
ffe0070667aabe2ab8072bb39a6e452341780c41
15,034
import re def tokenize(text, regex=r'[a-zA-z]+'): """Split text into tokens using a regular expression :param text: text to be tokenized :param regex: regular expression used to match tokens using re.findall :return: a list of resulting tokens >>> tokenize('the rain in spain') ['the', 'rain', 'in', 'spain'] """ return re.findall(regex, text, flags=re.IGNORECASE)
c7daee14ec14ff4a22d42883f3b3e3827924389e
15,036
from typing import Iterable def concatenate_lists(*list_of_lists: Iterable) -> list: """Combines the Iterables provided as arguments into one list. Examples -------- Normal usage:: concatenate_lists([1, 2], [3, 4], [5]) # returns [1, 2, 3, 4, 5] """ return [item for lst in list_of_lists for item in lst]
e630fd31888753814e3f486c02b0fc1e67f269ef
15,041
def is_attr_pattern(text): """Attribute patters are like '[title=Version \d.*]' """ return text.startswith('[')
05978edb1d6589f47a96ab6cb2da4b7f0e3f2569
15,044
def strip_nondigits(string): """ Return a string containing only the digits of the input string. """ return ''.join([c for c in string if c.isdigit()])
564a05de12c61a09a6d07e13b13279908128389a
15,049
def parse_begins(msg): """Parse the guard ID out of the "begins his shift" message.""" words = msg.split() return int(words[1][1:])
b07e3d741038365dddbacfdb70d219ef1bf007d8
15,052
import pickle def load_pickle(file_name): """ load a pickle object from the given pickle file :param file_name: path to the pickle file :return: obj => read pickle object """ with open(file_name, "rb") as pick: obj = pickle.load(pick) return obj
e50ae7ccd4b72700e5079774c45ec91b240bc88a
15,055
def ref_str_to_tuple(ref): """String like ' a : b ' to tuple like ('a', 'b').""" return tuple(x.strip() for x in ref.split(':'))
fc2e467f054d2b53a580f1d0917d01eda9ba1727
15,067
def bin_append(a, b, length=None): """ Appends number a to the left of b bin_append(0b1, 0b10) = 0b110 """ length = length or b.bit_length() return (a << length) | b
c2d3132532b1d9311d5b6eef94289c0763422665
15,069
def levenshtein_distance(word_1, word_2): """ Calculates the levenshtein distance (= the number of letters to add/ substitute/interchange in order to pass from word_1 to word_2) """ array = [[0 for i in range(len(word_2)+1)] for y in range(len(word_1)+1)] for i in range(len(word_1)+1): array[i][0] = i for j in range(len(word_2)+1): array[0][j] = j for i in range(1, len(word_1)+1): for j in range(1, len(word_2)+1): cost = 0 if word_1[i-1] == word_2[j-1] else 1 array[i][j] = min( array[i-1][j] + 1, array[i][j-1] + 1, array[i-1][j-1] + cost ) return array[len(word_1)][len(word_2)]
ce43e60454b59c3c1323656636f457bc192a2c67
15,070
import inspect def get_source_link(obj, page_info): """ Returns the link to the source code of an object on GitHub. """ package_name = page_info["package_name"] version = page_info.get("version", "master") base_link = f"https://github.com/huggingface/{package_name}/blob/{version}/src/" module = obj.__module__.replace(".", "/") line_number = inspect.getsourcelines(obj)[1] return f"{base_link}{module}.py#L{line_number}"
86748f179e44cec37efd88e1b8dc5de0c6268631
15,071
def concat_environment(env1, env2): """ Concatenate two environments. 1 - Check duplicated keys and concatenate their values. 2 - Update the concatenated environment. Parameters ---------- env1: dict (mandatory) First environment. env2: dict (mandatory) Second environment. Returns ------- concat_env: dict Updated environment where the duplicated keys values are concatenated with ':'. """ concat_env = env1 for key, value in env2.items(): if key in concat_env.keys(): if value != concat_env[key]: concat_env[key] += ":" + env2[key] else: concat_env[key] = env2[key] return concat_env
25f7aee5a9316ab0604f2e38538a1f67a5333b08
15,072
import itertools def get_emin_emax(self): """ Finds how much the Ek_grid has to be expanded above the bandwidth D of the leads. Parameters ---------- self : Approach2vN Approach2vN object. self.funcp.emin : float (Modifies) Minimal energy in the updated Ek_grid. self.funcp.emax : float (Modifies) Maximal energy in the updated Ek_grid. """ # (E, si, dband) = (self.qd.Ea, self.si, self.leads.dlst[0,1]) (E, si, dmin, dmax) = (self.qd.Ea, self.si, self.funcp.dmin, self.funcp.dmax) lst = [dmin, dmax] for charge in range(si.ncharge): for b, bp in itertools.product(si.statesdm[charge], si.statesdm[charge]): lst.append(dmax-E[b]+E[bp]) lst.append(dmin-E[b]+E[bp]) for charge in range(si.ncharge-2): for d, b in itertools.product(si.statesdm[charge+2], si.statesdm[charge]): lst.append(dmin+E[d]-E[b]) lst.append(dmax+E[d]-E[b]) self.funcp.emax = max(lst) self.funcp.emin = min(lst) return 0
102198b413f190264e231c80f896045fc2eb3f58
15,076
import re def substitute_pattern_with_char(s, pattern, repl_char='x'): """ This is a little different than re.sub(). It replaces all the characters that match the pattern with an equal number of `repl_char` characters. The resulting string should be the same length as the starting string. >>> substitute_pattern_with_char(s='Hi there', pattern=r'[a-z]+', repl_char='x') 'Hx xxxxx' >>> substitute_pattern_with_char(s='With 42 cats', pattern=r'[\d]+', repl_char='x') 'With xx cats' >>> substitute_pattern_with_char(s='With 42 cats and 12 dogs', pattern=r'[\d]+', repl_char='x') 'With xx cats and xx dogs' >>> substitute_pattern_with_char(s='With 42 cats and 12 dogs', pattern=r'[\d]+\s+(cat[s]?|bird[s]?)', repl_char='x') 'With xxxxxxx and 12 dogs' """ for mo in re.finditer(pattern=pattern, string=s): m = mo.group(0) s = s.replace(m, ''.join([repl_char for i in range(0, len(m))])) return s
98d7f3d642430a5211aa7396a3454d979057ebef
15,077
def extract_table_name(file_name: str) -> str: """Extract the table name name from the filename Assumes the name of the bigquery table the data is being inserted to is the first word of the filename Examples: extract_file_extension(properties/properties_09.csv) >>> "properties" """ return file_name.split("/")[0]
1cac0a58325651abff1399974df52aaefa93585f
15,079
def avg(arr): """Count average.""" return sum(arr) / float(len(arr))
0da964cdb1d14154b4569d010b4caa710a30fd84
15,084
from typing import Any def get_full_name(obj: Any) -> str: """Returns identifier name for the given callable. Should be equal to the import path: obj == import_object(get_full_name(obj)) Parameters ---------- obj : object The object to find the classpath for. Returns ------- The object's classpath. """ if callable(obj): return obj.__module__ + '.' + obj.__qualname__ else: return obj.__class__.__module__ + '.' + obj.__class__.__qualname__
6f83f808f8c4d226b1d26365adc75f5cb6c4e28f
15,087
def estimate_microturbulence(effective_temperature, surface_gravity): """ Estimate microtubulence from relations between effective temperature and surface gravity. For giants (logg < 3.5) the relationship employed is from Kirby et al. (2008, ) and for dwarfs (logg >= 3.5) the Reddy et al. (2003) relation is used. :param effective_temperature: The effective temperature of the star in Kelvin. :type effective_temperature: float :param surface_gravity: The surface gravity of the star. :type surface_gravity: float :returns: The estimated microturbulence (km/s) from the given stellar parameters. :rtype: float """ if surface_gravity >= 3.5: return 1.28 + 3.3e-4 * (effective_temperature - 6000) \ - 0.64 * (surface_gravity - 4.5) else: return 2.70 - 0.509 * surface_gravity
bf54342e00fc61f042f183c8bbebc01005eb6b4c
15,089
import re def messagestat_formatting(messageBody): """ Formats the message body for messagestat functions. This mostly involves replacing spaces with underscores and doubling up on quotations so that SQL server ignores them """ messageBody = re.sub(' ', '_', messageBody) messageBody = re.sub("'", "''", messageBody) if messageBody.startswith('"') and messageBody.endswith('"'): messageBody = messageBody[1:] messageBody = messageBody[:-1] return(messageBody)
f91a8fe73b336f97be8322c6b5034013090e0807
15,099
def _ngrams_from_tokens(tokens, n, join=True, join_str=' '): """ Helper function to produce ngrams of length `n` from a list of string tokens `tokens`. :param tokens: list of string tokens :param n: size of ngrams :param join: if True, join each ngram by `join_str`, i.e. return list of ngram strings; otherwise return list of ngram lists :param join_str: if `join` is True, use this string to join the parts of the ngrams :return: return list of ngram strings if `join` is True, otherwise list of ngram lists """ if n < 2: raise ValueError('`n` must be at least 2') if len(tokens) == 0: return [] if len(tokens) < n: # raise ValueError('`len(tokens)` should not be smaller than `n`') ngrams = [tokens] else: ngrams = [[tokens[i+j] for j in range(n)] for i in range(len(tokens)-n+1)] if join: return list(map(lambda x: join_str.join(x), ngrams)) else: return ngrams
39afd35a4e715ea3e358a81d3a557eb1f0f0310a
15,103
def parse_prior_params(bt_config, code_config, key, default, prior='pre'): """ Parse parameters with priority. Args: bt_config(dict): bt config code_config(dict): code config key(string): parameter name default(default): default value prior(string): use bt_config in prior if 'pre' otherwise code_config Returns: value """ if prior == 'pre': return bt_config.get(key, code_config.get(key, default)) else: return code_config.get(key, bt_config.get(key, default))
beee29f04562173d18cc95b2b49683dee02fbca8
15,104
def entity_tostring(entity): """Converts one GNL (Google Natural Language) entity to a readable string.""" metadata = ", ".join(['"%s": "%s"' % (key, value) for key, value in entity.metadata.items()]) mentions = ", ".join(['"%s"' % mention for mention in entity.mentions]) return ('{name: "%s",' ' type: "%s",' ' metadata: {%s},' ' salience: %s,' ' mentions: [%s]}') % ( entity.name, entity.type, metadata, entity.salience, mentions)
dd3e30247e36186e6eccfe1e32f8f31bf3577660
15,108
import networkx def graph_from_dict(d): """ Creates a NetworkX Graph from a dictionary Parameters ---------- d : dict Returns ------- Graph: NetworkX Graph Examples -------- >>> g = graph_from_dict({'a':['b'], 'b':['c', 'd'], 'c':[], 'd':[], 'e':['d']}) """ g = networkx.DiGraph() for key, children in d.items(): for child in children: g.add_edge(key, child) return g
e0029b6018ff840bd2f314038c25f41e025600a7
15,110
def dectobin(dec_string): """Convert a decimal string to binary string""" bin_string = bin(int(dec_string)) return bin_string[2:]
5f02507ae5e7ab855eceb7a5908347060b46a400
15,111
def underscorize(camelcased): """ Takes a CamelCase string and returns a separated_with_underscores version of that name in all lower case. If the name is already all in lower case and/or separated with underscores, then the returned string is identical to the original. This function is used to take CStruct class names and determine the names of their handler methods. Here are some example conversions: underscorize("SomeStruct") == "some_struct" underscorize("SSNLookup") == "ssn_lookup" underscorize("RS485Adaptor") == "rs485_adaptor" underscorize("Rot13Encoded") == "rot13_encoded" underscorize("RequestQ") == "request_q" underscorize("John316") == "john316" """ underscored, prev = "", "" for i,c in enumerate(camelcased): if (prev and not c.islower() and c != "_" and (prev.islower() and not c.isdigit() or c.isupper() and camelcased[i+1:i+2].islower())): underscored += "_" underscored += c.lower() prev = c return underscored
b0f2622c105c09502aa984e15cf1b61ac12a608b
15,112
def best_validation_rows(log_df, valid_col='valid_accuracy', second_criterion='iterations_done'): """ Takes a dataframe created by scripts/logs_to_dataframe.py and returns a dataframe containing the best-validation row for each log. """ return log_df.sort_values([valid_col,second_criterion],ascending=False).drop_duplicates(['log'])
9541adb6653a93bfe0385bd24beedf80b065dde7
15,114
def getMsgTime(line): """Parse the timestamp off the 978 message and return as a float. Args: line (str): Line containing a 978 message with a timestamp at the end. Returns: float: Timestamp of the message. Raises: Exception: If the timestamp can't be found. """ payloadTimeIndex = line.find(';t=') if payloadTimeIndex == -1: raise Exception('Illegal time format') timeInSecs = float(line[payloadTimeIndex + 3:-1]) return timeInSecs
860a02a28d154357d6fcc61de182fee1f4875aaa
15,121
def ordinalize(given_number: int) -> str: """Ordinalize the number from the given number Args: given_number (int): integer number Example: >>> ordinalize(34) '34th' Returns: str: string in ordinal form """ suffix = ["th", "st", "nd", "rd"] thenum = int(given_number) if thenum % 10 in [1, 2, 3] and thenum not in [11, 12, 13]: return f'{thenum}{suffix[thenum % 10]}' else: return f'{thenum}{suffix[0]}'
e0b43b3b8353e9e79d2f13a36198782a2a5dcd73
15,126
def get_filenames(hda_dict): """ Generates a list of filenames taken from the results dictionary, retrieved with the function request_results_list. Parameters: hda_dict: dictionary initied with the function init, that stores all required information to be able to interact with the HDA API Returns: Returns a list of filenames for each entry stored in the dictionary returned by the function request_results_list. """ fileName = [] for file in hda_dict['results']['content']: fileName.append(file['filename']) return fileName
c14911b14fa2b31f061b4420875c603d8acce5c1
15,127
def is_copula_relation(token1, token2): """Return True if `token1` is a copula dependent of `token2`. We don't want to capture cases where `token2` is an adjective, because we capture those in `is_predicated_adjective_relation()`. """ return ( (token1.deprel == "cop") and (token2.upos != "ADJ") and (token1.head == token2.id) )
35e7b19c3cf1662c09a8fd80c8099073de11bc51
15,133
def _safe_decr(line_num): """ Return @line_num decremented by 1, if @line_num is non None, else None. """ if line_num is not None: return line_num - 1
ad6092b68240f39ccba13fda44bbf8af22d126f4
15,137
def _build_verbose_results(ret, show_success): """ Helper function that builds the results to be returned when the verbose parameter is set """ verbose_results = {'Failure': []} for tag_data in ret.get('Failure', []): tag = tag_data['tag'] verbose_results['Failure'].append({tag: tag_data}) verbose_results['Success'] = [] for tag_data in ret.get('Success', []): tag = tag_data['tag'] verbose_results['Success'].append({tag: tag_data}) if not show_success and 'Success' in verbose_results: verbose_results.pop('Success') verbose_results['Controlled'] = [] for tag_data in ret.get('Controlled', []): tag = tag_data['tag'] verbose_results['Controlled'].append({tag: tag_data}) if not verbose_results['Controlled']: verbose_results.pop('Controlled') return verbose_results
53f5ad2525893e7277014664555894f51c601d4d
15,143
def get_vpsa(session, vpsa_id, return_type=None, **kwargs): """ Retrieves details for a single VPSA. :type session: zadarapy.session.Session :param session: A valid zadarapy.session.Session object. Required. :type vpsa_id: int :param vpsa_id: The VPSA 'id' value as returned by get_all_vpsas. For example: '2653'. Required. :type return_type: str :param return_type: If this is set to the string 'json', this function will return a JSON string. Otherwise, it will return a Python dictionary. Optional (will return a Python dictionary by default). :rtype: dict, str :returns: A dictionary or JSON data set as a string depending on return_type parameter. """ if type(vpsa_id) is int: vpsa_id = str(vpsa_id) if not vpsa_id.isdigit(): raise ValueError('The VPSA ID should be a positive integer.') path = '/api/vpsas/{0}.json'.format(vpsa_id) return session.get_api(path=path, return_type=return_type, **kwargs)
9b2c33c7ae772cf602528e10618117ccc12dfc6e
15,157