content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def concat_files(*files): """ Concat some files together. Returns out and err to keep parity with shell commands. Args: *files: src1, src2, ..., srcN, dst. Returns: out: string err: string """ out = '' err = '' dst_name = files[-1] sources = [files[f] for f in range(len(files)) if f < len(files) - 1] with open(dst_name, 'w') as dst: for f in sources: with open(f, 'r') as src: for line in src: dst.write(line) return out, err
101c37e5b3955c153c8c2210e7575a62341c768a
706,998
import os def join_paths(path, *paths): """ """ return os.path.join(path, *paths)
fdd069ba4414831a201192d096cdb7723037d3dc
706,999
def quantize(img): """Quantize the output of model. :param img: the input image :type img: ndarray :return: the image after quantize :rtype: ndarray """ pixel_range = 255 return img.mul(pixel_range).clamp(0, 255).round().div(pixel_range)
49abd32d8b2cf54c955e16765602bbff77a2a1b9
707,000
import subprocess def get_length(filename): """ Get the length of a specific file with ffrobe from the ffmpeg library :param filename: this param is used for the file :type filename: str :return: length of the given video file :rtype: float """ # use ffprobe because it is faster then other (for example moviepy) result = subprocess.run([ "ffprobe", "-v", "error", "-show_entries", "format=duration", "-of", "default=noprint_wrappers=1:nokey=1", filename ], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) return float(result.stdout)
335e220c14612ea5a5d0a330043b75e4d4d1a050
707,002
import os def is_File(path): """Takes the path of the folder as argument Returns is the path is a of a Folder or not in bool""" if os.path.isfile(path): return True else: return False
63ec104ab50c8644856d980bedf04b101f2730e1
707,003
from pathlib import Path import os def change_path(path, dir="", file="", pre="", post="", ext=""): """ Change the path ingredients with the provided directory, filename prefix, postfix, and extension :param path: :param dir: new directory :param file: filename to replace the filename full_path :param pre: prefix to be appended to filename full_path :param post: postfix to be appended to filename full_path :param ext: extension of filename to be changed :return: """ target = "" path_obj = Path(path) old_filename = path_obj.name.replace(path_obj.suffix, "") \ if len(path_obj.suffix) > 0 else path_obj.name if os.name == "nt": if len(dir) > 0: directory = dir elif path.endswith("\\"): directory = path[:-1] old_filename = "" else: directory = str(path_obj.parent) old_extension = path_obj.suffix new_filename = file if len(file) > 0 else old_filename new_filename = pre + new_filename if len(pre) > 0 else new_filename new_filename = new_filename + post if len(post) > 0 else new_filename new_extension = "." + ext if len(ext) > 0 else old_extension target = directory + "\\" + new_filename + new_extension else: if len(dir) > 0: directory = dir elif path.endswith("/"): directory = path[:-1] old_filename = "" else: directory = str(path_obj.parent) old_extension = path_obj.suffix new_filename = file if len(file) > 0 else old_filename new_filename = pre + new_filename if len(pre) > 0 else new_filename new_filename = new_filename + post if len(post) > 0 else new_filename new_extension = "." + ext if len(ext) > 0 else old_extension target = directory + "/" + new_filename + new_extension return target
b629da207f96f4476d6eda3a1c88b1c63f701742
707,004
def midi_to_chroma(pitch): """Given a midi pitch (e.g. 60 == C), returns its corresponding chroma class value. A == 0, A# == 1, ..., G# == 11 """ return ((pitch % 12) + 3) % 12
25ef72f78269c3f494ca7431f1291891ddea594a
707,005
import re def _snippet_items(snippet): """Return all markdown items in the snippet text. For this we expect it the snippet to contain *nothing* but a markdown list. We do not support "indented" list style, only one item per linebreak. Raises SyntaxError if snippet not in proper format (e.g. contains anything other than a markdown list). """ unformatted = snippet.text and snippet.text.strip() # treat null text value as empty list if not unformatted: return [] # parse out all markdown list items items = re.findall(r'^[-*+] +(.*)$', unformatted, re.MULTILINE) # if there were any lines that didn't yield an item, assume there was # something we didn't parse. since we never want to lose existing data # for a user, this is an error condition. if len(items) < len(unformatted.splitlines()): raise SyntaxError('unparsed lines in user snippet: %s' % unformatted) return items
bdeb5b5c5e97ef3a8082b7131d46990de02a59af
707,006
import numpy as np def apogeeid_digit(arr): """ NAME: apogeeid_digit PURPOSE: Extract digits from apogeeid because its too painful to deal with APOGEE ID in h5py INPUT: arr (ndarray): apogee_id OUTPUT: apogee_id with digits only (ndarray) HISTORY: 2017-Oct-26 - Written - Henry Leung (University of Toronto) """ if isinstance(arr, np.ndarray) or isinstance(arr, list): arr_copy = np.array(arr) # make a copy for i in range(arr_copy.shape[0]): arr_copy[i] = str(''.join(filter(str.isdigit, arr_copy[i]))) return arr_copy else: return str(''.join(filter(str.isdigit, arr)))
48e21ab69c9f733dbf7b612994bfed35b8980424
707,007
def is_text_area(input): """ Template tag to check if input is file :param input: Input field :return: True if is file, False if not """ return input.field.widget.__class__.__name__ == "Textarea"
4657a93809e123aaa27ee0a202b33e0383ac23cc
707,009
import os def after_file_name(file_to_open): """ Given a file name return as: [file_to_open root]_prep.[file-to_open_ending] Parameters ---------- file_to_open : string Name of the input file. Returns -------- after_file : string Full path to the (new) file. Examples --------- >>> from preparenovonix.novonix_io import after_file_name >>> after_file_name('example_data/example_data.csv') """ # Extract the path and file name dirname, fname = os.path.split(os.path.abspath(file_to_open)) root = fname.split(".")[0] ending = fname.split(".")[1] fname = root + "_prep." + ending after_file = os.path.join(dirname, fname) return after_file
8b06b3cabbe8dd388cafc2d9d48b30feb2f6c254
707,010
import itertools import re def parse_cluster_file(filename): """ Parse the output of the CD-HIT clustering and return a dictionnary of clusters. In order to parse the list of cluster and sequences, we have to parse the CD-HIT output file. Following solution is adapted from a small wrapper script ([source code on Github](https://github.com/Y-Lammers/CD-HIT-Filter/blob/master/CD-HIT-Filter.py), author: Youri Lammers). """ # parse through the .clstr file and create a dictionary # with the sequences per cluster # open the cluster file and set the output dictionary cluster_file, cluster_dic = open(filename), {} # parse through the cluster file and store the cluster name + sequences in the dictionary # This is a generator comprehension which groups lines together based of wether the # line starts with a ">". cluster_groups = (x[1] for x in itertools.groupby(cluster_file, key=lambda line: line[0] == '>')) # Now we get alternate groups of cluster name and sequence list. for cluster in cluster_groups: # Note: next(cluster) retrieves the first line of the cluster i (>cluster name) name = next(cluster).strip() name = re.sub(' ', '_', name[1:]) # Note: next(cluster_groups) retrieves the next cluster i+1 containing the sequences # the cluster is itself an iterator (every line) seqs = [seq.split('>')[1].split('...') for seq in next(cluster_groups)] # Write a boolean value True if sequence is the reference sequence from the cluster seqs = [[seq[0], (True if seq[1] == ' *\n' else False)] for seq in seqs] cluster_dic[name] = seqs # return the cluster dictionary return cluster_dic
d50eaeb926be3a7b8d1139c82142e4a1b595c1a0
707,011
def openTopics(): """Opens topics file :return: list of topics """ topicsFile = 'topics' with open(topicsFile) as f: topics = f.read().split() return topics
e6d43ff6717122532a71355b71134d6f78f9db85
707,012
def process_coins(): """calculate the amount of money paid based on the coins entered""" number_of_quarters = int(input("How many quarters? ")) number_of_dimes = int(input("How many dimes? ")) number_of_nickels = int(input("How many nickels? ")) number_of_pennies = int(input("How many pennies? ")) quarters = number_of_quarters * 0.25 dimes = number_of_dimes * 0.10 nickels = number_of_nickels * 0.05 pennies = number_of_pennies * 0.01 total_inserted = quarters + dimes + nickels + pennies return total_inserted
6a26ad161720554079a76f6bdadbbf9555d6b82d
707,013
import struct def xor_string(hash1, hash2, hash_size): """Encrypt/Decrypt function used for password encryption in authentication, using a simple XOR. Args: hash1 (str): The first hash. hash2 (str): The second hash. Returns: str: A string with the xor applied. """ xored = [h1 ^ h2 for (h1, h2) in zip(hash1, hash2)] return struct.pack("{0}B".format(hash_size), *xored)
4efc263a0ff9fb05b0ee7cb7b7b3fdd4c8c0c2ec
707,014
import base64 def decode_password(base64_string: str) -> str: """ Decode a base64 encoded string. Args: base64_string: str The base64 encoded string. Returns: str The decoded string. """ base64_bytes = base64_string.encode("ascii") sample_string_bytes = base64.b64decode(base64_bytes) return sample_string_bytes.decode("ascii")
0f04617c239fbc740a9b4c9c2d1ae867a52e0c74
707,015
def to_pascal_case(value): """ Converts the value string to PascalCase. :param value: The value that needs to be converted. :type value: str :return: The value in PascalCase. :rtype: str """ return "".join(character for character in value.title() if not character.isspace())
138ab9ddf7ca814b50bf8ff0618de03b236535c7
707,016
from typing import Iterable from typing import Any from typing import List def drop(n: int, it: Iterable[Any]) -> List[Any]: """ Return a list of N elements drop from the iterable object Args: n: Number to drop from the top it: Iterable object Examples: >>> fpsm.drop(3, [1, 2, 3, 4, 5]) [4, 5] """ return list(it)[n:]
0732bd560f0da0a43f65ee3b5ed46fd3a05e26f5
707,017
def csv_args(value): """Parse a CSV string into a Python list of strings. Used in command line parsing.""" return map(str, value.split(","))
b2596180054f835bfe70e3f900caa5b56a7856a6
707,018
import os import subprocess def linux_compute_tile_singlecore(optimsoc_buildroot): """ Module-scoped fixture: build a Linux image for a single-core compute tile """ # Get the buildroot base directory from the optimsoc_buildroot() fixture. # Note that this directory is cached between pytest runs. Make sure the # commands executed as part of this test can deal with that and rebuild # artifacts as needed. src_optimsoc_buildroot = optimsoc_buildroot.join('optimsoc-buildroot') src_buildroot = optimsoc_buildroot.join('buildroot') config = 'optimsoc_computetile_singlecore_defconfig' # buildroot doesn't like our OpTiMSoC compiler being in the path. Error is: # --- # You seem to have the current working directory in your # LD_LIBRARY_PATH environment variable. This doesn't work. # support/dependencies/dependencies.mk:21: recipe for target 'dependencies' failed # --- env = dict(os.environ, LD_LIBRARY_PATH='', PATH='/bin:/usr/bin:/usr/local/bin') cmd = ['make', '-C', str(src_buildroot), 'BR2_EXTERNAL='+str(src_optimsoc_buildroot), config] subprocess.check_output(cmd, env=env, stderr=subprocess.STDOUT, universal_newlines=True) cmd = ['make', '-C', str(src_buildroot)] env = dict(os.environ, LD_LIBRARY_PATH='') subprocess.check_output(cmd, env=env, stderr=subprocess.STDOUT, universal_newlines=True) linux_img = src_buildroot.join('output/images/vmlinux') return linux_img
9eec134fb48c678eb18a25290d82427648c8ea31
707,019
def reverse( sequence ): """Return the reverse of any sequence """ return sequence[::-1]
f08ae428844347e52d8dbf1cd8ad07cfbf4ef597
707,020
import os def get_table_name(yaml_path): """gives how the yaml file name should be in the sql query""" table_name = os.path.basename(yaml_path) table_name = os.path.splitext(table_name)[0] return table_name
5181e1e68a844bc529573da02a78f034092def46
707,021
def check(verbose=1): """ Runs a couple of functions to check the module is working. :param verbose: 0 to hide the standout output :return: list of dictionaries, result of each test """ return []
4ecf144fc64a165b5b0f9766b76eb6b703eba130
707,022
def s3_example_tile(gtiff_s3): """Example tile for fixture.""" return (5, 15, 32)
a4b7e35fc6f7bf51a551ac8cb18003c23ff35a01
707,023
def kwargs_to_flags(**kwargs): """Convert `kwargs` to flags to pass on to CLI.""" flag_strings = [] for (key, val) in kwargs.items(): if isinstance(val, bool): if val: flag_strings.append(f"--{key}") else: flag_strings.append(f"--{key}={val}") return " ".join(flag_strings)
aa672fe26c81e7aaf8a6e7c38354d1649495b8df
707,025
def A12_6_3_2(FAxial, eta, Pp, Pu, Muey , Muez, Muay, Muaz, Ppls, Mby, Mbz, GammaRPa, GammaRPb): """ A.12.6.3.2 Interaction equation approach where : Pu is the applied axial force in a member due to factored actions, determined in an analysis that includes Pu effects (see A.12.4); Ppls is the representative local axial strength of a non-circular prismatic member, Pp is the representative axial strength of a non-circular prismatic member, Muey is the corrected bending moment due to factored actions about the member y-axis from A.12.4; Muez is the corrected bending moment due to factored actions about the member z-axis from A.12.4; Muay is the amplified bending moment due to factored actions about the member y-axis from A.12.4; Muaz is the amplified bending moment due to factored actions about the member z-axis from A.12.4; Mby is the representative bending moment strength about the member y-axis, as defined in A.12.6.2.5 or A.12.6.2.6. """ # Each non-circular prismatic structural member should satisfy # the following conditions in Equations (A.12.6-38] to [A.12.6-40] # at all cross-sections along its length. When the shear due to # factored actions is greater than 60 percent of the shear strength, # the bending moment strength should be reduced parabolically to zero # when the shear equals the shear strength (Pv in A.12.6.3.4). # # Local strength check (for all members): # (A.12.6-38) _UR1 = ((GammaRPa * Pu / Ppls) + pow((pow((GammaRPb * Muey / Mby),eta) + pow((GammaRPb * Muez / Mbz),eta)), 1.0 / eta)) print("") print("A.12.6.3.2 Interaction equation approach") print("Uint [Local strength check ] = {: 1.4f}".format(_UR1)) _UR2 = 0 if FAxial == 'compression': # and beam-column check (for members subject to axial compression): if GammaRPa * Pu / Pp > 0.20: # after AISC[A.12.5-1], Equation H1-1a (A.12.6-39) _UR2 = ((GammaRPa * Pu / Pp) + (8.0 / 9.0) * pow((pow((GammaRPb * Muay / Mby),eta) + pow((GammaRPb * Muaz / Mbz),eta)), 1.0 / eta)) # else: # after AISC[A.12.5-1], Equation H1-1b (A.12.6-40) _UR2 = ((GammaRPa * Pu / (2.0 * Pp)) + pow((pow((GammaRPb * Muay / Mby),eta) + pow((GammaRPb * Muaz / Mbz),eta)), 1.0/eta)) print("Uint [beam-column check ] = {: 1.4f}".format(_UR2)) print("-----------------") # # # return _UR1, _UR2 #
7a36ec489681100f99563f9c336df1306363851d
707,026
def _groupby_clause(uuid=None, owner=None, human_name=None, processing_name=None): """ Build the groupby clause. Simply detect which fields are set, and group by those. Args: uuid: owner: human_name: processing_name: Returns: (str): "field, ..., field" """ gbc = '' clauses = [] if uuid is not None: clauses.append('uuid') if owner is not None: clauses.append('owner') if human_name is not None: clauses.append('human_name') if processing_name is not None: clauses.append('processing_name') if len(clauses) > 0: gbc = ','.join(clauses) return gbc
21546efa19e841661ed3a7ad8a84cf9a9a76d416
707,027
def has_numbers(input_str: str): """ Check if a string has a number character """ return any(char.isdigit() for char in input_str)
5038cb737cdcfbad3a7bd6ac89f435559b67cebc
707,028
def create_trackhub_resource(project_dir, api_client, create_user_resource, create_genome_assembly_dump_resource): """ This fixture is used to create a temporary trackhub using POST API The created trackhub will be used to test GET API """ _, token = create_user_resource api_client.credentials(HTTP_AUTHORIZATION='Token ' + str(token)) submitted_hub = { 'url': 'file:///' + str(project_dir) + '/' + 'samples/JASPAR_TFBS/hub.txt' } response = api_client.post('/api/trackhub/', submitted_hub, format='json') return response
a81db1e7c9c95355457d9f6c4ec4c6428e1a77a7
707,029
def auth_test(): """ Test's the endpoint authenticiation works. :return: """ return "hello"
7c65897d83b0af41307aec28d7f2ce3d6852f8b7
707,030
import subprocess import tempfile import pipes def run_exkeys(hosts, capture=False): """ Runs gpssh-exkeys for the given list of hosts. If capture is True, the (returncode, stdout, stderr) from the gpssh-exkeys run is returned; otherwise an exception is thrown on failure and all stdout/err is untouched. """ host_opts = [] for host in hosts: host_opts.extend(['-h', host]) args = [ 'gpssh-exkeys', '-v' ] + host_opts if not capture: subprocess.check_call(args) return # Capture stdout/err for later use, while routing it through tee(1) so that # developers can still see the live stream output. # # XXX This is a very heavy-weight solution, using pipes.Template() for the # creation of shell pipeline processes. It's also platform-specific as it # relies on the functionality of /dev/stdout and /dev/stderr. # # The overview: we open up two shell processes running tee(1), using # pipes.Template(), and connect their standard output to the stdout/err of # the current Python process using Template.open(). We then connect the # stdout/stderr streams of subprocess.call() to the stdin of those tee # pipelines. tee(1) will duplicate all output to temporary files, which we # read after the subprocess call completes. NamedTemporaryFile() then cleans # up those files when we return. with tempfile.NamedTemporaryFile() as temp_out, tempfile.NamedTemporaryFile() as temp_err: pipe_out = pipes.Template() pipe_out.append('tee %s' % pipes.quote(temp_out.name), '--') pipe_err = pipes.Template() pipe_err.append('tee %s' % pipes.quote(temp_err.name), '--') with pipe_out.open('/dev/stdout', 'w') as out, pipe_err.open('/dev/stderr', 'w') as err: ret = subprocess.call(args, stdout=out, stderr=err) stored_out = temp_out.read() stored_err = temp_err.read() return ret, stored_out, stored_err
1ecb634e76b2ed68966457a0baac2122da00270f
707,031
def rename_group(str_group2=None): """ Rename OFF food group (pnns_group_2) to a standard name Args: str_group2 (str): OFF food group name Returns: conv_group (str): standard food group name """ #convert_group1 = {'Beverage':['Beverages'], # 'Cereals':['Cereals and potatoes'], # 'Meal':['Composite foods'], # 'Fat':['Fat and sauces'], # 'Meat':['Fish Meat Eggs'], # 'Fruits and vegetables':['Fruits and vegetables','fruits-and-vegetables'], # 'Dairy':['Milk and dairy products'], # 'Snack':['Salty snacks','Sugary snacks','sugary-snacks'], # None:[None,'unknown','']} convert_group2 = {'Beverage':['Alcoholic beverages','Artificially sweetened beverages', 'Fruit juices','Fruit nectars','Non-sugared beverages', 'Sweetened beverages'], 'Cereals':['Bread','Breakfast cereals','Cereals','Legumes','Patatoes'], 'Meal':['One-dish meals','Pizza pies and quiche','Sandwich'], 'Fat':['Dressings and sauces','Fats'], 'Meat':['Tripe dishes','Eggs','Fish and seafood','Meat','Processed meat','Nuts'], 'Fruit':['Fruits','fruits','Dried fruits'], 'Vegetable':['Soups','Vegetables','vegetables'], 'Dairy':['Cheese','Dairy desserts','Ice cream','Milk and yogurt'], 'Snack':['Appetizers','Salty and fatty products','Biscuits and cakes', 'Chocolate products','Sweets','pastries'], None:[None,'unknown','']} conv_group = [key for (key, value) in convert_group2.items() if (str_group2 in value)] conv_group = [None] if not conv_group else conv_group return conv_group[0]
31b52f600fe3a087f8b230c880ae55f0dd63264e
707,032
def read_cat_file(genomeCatFile): """ Read in genome categories and create dictionary of category name and genomes in that category""" inFile = open(genomeCatFile, 'r') catDict = {} for line in inFile: line = line.strip() entries = line.split() genome = entries[0] cat = entries[1] if cat in catDict: catDict[cat].add(genome) else: catDict[cat] = {genome} inFile.close() return catDict
23a30f29cb62d56a3e0763be34cad45717421815
707,034
import collections def get_gradients_through_compute_gradients(optimizer, loss, activations): """Compute gradients to send to TPU embedding. Args: optimizer: a subclass of optimizer.Optimizer, usually CrossShardOptimizer. Used to call compute_gradients(). loss: a Tensor to call optimizer.compute_gradients() on. activations: an OrderedDict mapping feature_name to Tensors of activations. Returns: An OrderedDict mapping from feature name Strings to Tensors of gradients of the loss wrt the activations of the features. """ activation_list = activations.values() grads_and_vars = optimizer.compute_gradients(loss, activation_list) grads = [grad for grad, _ in grads_and_vars] feature_to_gradient_dict = collections.OrderedDict( zip(activations.keys(), grads)) return feature_to_gradient_dict
2a2ebca1e6024e11f541e3ccaf1fee4acd7ab745
707,035
def AdditionalMedicareTax(e00200, MARS, AMEDT_ec, sey, AMEDT_rt, FICA_mc_trt, FICA_ss_trt, ptax_amc, payrolltax): """ Computes Additional Medicare Tax (Form 8959) included in payroll taxes. Notes ----- Tax Law Parameters: AMEDT_ec : Additional Medicare Tax earnings exclusion AMEDT_rt : Additional Medicare Tax rate FICA_ss_trt : FICA Social Security tax rate FICA_mc_trt : FICA Medicare tax rate Taxpayer Charateristics: e00200 : Wages and salaries sey : Self-employment income Returns ------- ptax_amc : Additional Medicare Tax payrolltax : payroll tax augmented by Additional Medicare Tax """ line8 = max(0., sey) * (1. - 0.5 * (FICA_mc_trt + FICA_ss_trt)) line11 = max(0., AMEDT_ec[MARS - 1] - e00200) ptax_amc = AMEDT_rt * (max(0., e00200 - AMEDT_ec[MARS - 1]) + max(0., line8 - line11)) payrolltax += ptax_amc return (ptax_amc, payrolltax)
de0e35fbe5c7c09de384e1302cba082149ea5930
707,036
import copy def append_step_list(step_list, step, value, go_next, mode, tag): """from step_list, append the number of times a step needs to be repeated if runmode or retry is present :Arguments: step_list = Ordered list of steps to be executed step = Current step value = attempts in runmode/retry go_next = value of the real next step mode = runmode or retry tag = In runmode it is value, in retry it is count :Return: step_list = New step list formed by appending the replicated steps """ for i in range(0, value): copy_step = copy.deepcopy(step) copy_step.find(mode).set(tag, go_next) copy_step.find(mode).set("attempt", i + 1) copy_step.find(mode).set(mode+"_val", value) step_list.append(copy_step) return step_list
b8b5b3614fea0709b484df087ffa3ee2861532c4
707,037
def get_parameter(dbutils, parameter_name: str, default_value='') -> str: """Creates a text widget and gets parameter value. If ran from ADF, the value is taken from there.""" dbutils.widgets.text(parameter_name, default_value) return dbutils.widgets.get(parameter_name)
cf8359e6acea68ea26e24cc656847e5560019bd1
707,039
def aic(llf, nobs, df_modelwc): """ Akaike information criterion Parameters ---------- llf : {float, array_like} value of the loglikelihood nobs : int number of observations df_modelwc : int number of parameters including constant Returns ------- aic : float information criterion References ---------- https://en.wikipedia.org/wiki/Akaike_information_criterion """ return -2.0 * llf + 2.0 * df_modelwc
3940c1c86325630248fdf4a50c2aa19b4f4df623
707,040
def packpeeklist1(n1, n2, n3, n4, n5): """ Packs and returns 5 item list """ listp = [n1, n2, n3, n4, n5] return listp
4b781ff3e8eb4a1bd51f8e834fab5462371a85c5
707,041
def gen_gap(Pn, T, Q): """Runs the generalization gap test. This test simply checks the difference between the likelihood assigned to the training set versus that assigned to a held out test set. Inputs: Pn: (n X d) np array containing the held out test sample of dimension d T: (l X d) np array containing the training sample of dimension d Q: trained model of type scipy.neighbors.KernelDensity Outputs: log_lik_gap: scalar representing the difference of the log likelihoods of Pn and T """ return Q.score(T) - Q.score(Pn)
d57d16c06d05cea86e6f6ea89484574f20500170
707,043
def get_unstaged_files(gitobj): """ ref: http://gitpython.readthedocs.io/en/stable/tutorial.html#obtaining-diff-information """ diff = [] diff.extend(gitobj.index.diff(gitobj.head.commit)) diff.extend(gitobj.index.diff(None)) return {"changed": diff, "untracked": gitobj.untracked_files}
623a2706bb0d2c428df0f44fe10a473e7d740938
707,044
import os def save_tiles(tiles, prefix='', directory=os.getcwd(), format='png'): """ Write image files to disk. Create specified folder(s) if they don't exist. Return list of :class:`Tile` instance. Args: tiles (list): List, tuple or set of :class:`Tile` objects to save. prefix (str): Filename prefix of saved tiles. Kwargs: directory (str): Directory to save tiles. Created if non-existant. Returns: Tuple of :class:`Tile` instances. """ # Causes problems in CLI script. # if not os.path.exists(directory): # os.makedirs(directory) for tile in tiles: tile.save(filename=tile.generate_filename(prefix=prefix, directory=directory, format=format), format=format) return tuple(tiles)
2848eee201d16ca15eed06019199d95a59393a37
707,045
from typing import Union from pathlib import Path from typing import Counter def first(filename: Union[str, Path]) -> int: """ Sort the input, prepend with 0 and append with 3 + the max. Return: (# of successive differences == 1) * (# of successive differences == 3) """ with open(filename, "rt") as infile: jolts = sorted(int(line.strip()) for line in infile) jolts = [0] + jolts + [jolts[-1] + 3] diffs = Counter(right - left for left, right in zip(jolts[:-1], jolts[1:])) return diffs[3] * diffs[1]
18ffe3e97d7256ea61fcf6e436d36bb360d0a285
707,046
def create_slice_obj(start, end, step): """Create slice object""" return slice(start, end, step)
88a5c5a9e0d3b714b4316d8744fcdd1a34f347a7
707,047
def scalar(typename): """ Returns scalar type from ROS message data type, like "uint8" from "uint8[100]". Returns type unchanged if already a scalar. """ return typename[:typename.index("[")] if "[" in typename else typename
729fb68bced11e190b3d32d03bbadd921f191bee
707,048
def get_version(): """ It returns the pmml version . Returns ------- version : String Returns the version of the pmml. """ version = '4.4' return version
162f6e0ffb4c4741fafe2aa16d6fceed16bae99a
707,049
from typing import Dict from typing import Type def remap_shared_output_descriptions(output_descriptions: Dict[str, str], outputs: Dict[str, Type]) -> Dict[str, str]: """ Deals with mixed styles of return value descriptions used in docstrings. If the docstring contains a single entry of return value description, that output description is shared by each output variable. :param output_descriptions: Dict of output variable names mapping to output description :param outputs: Interface outputs :return: Dict of output variable names mapping to shared output description """ # no need to remap if len(output_descriptions) != 1: return output_descriptions _, shared_description = next(iter(output_descriptions.items())) return {k: shared_description for k, _ in outputs.items()}
06d589016a747230f88aa3507bd751fd30095222
707,050
def fitarg_rename(fitarg, ren): """Rename variable names in ``fitarg`` with rename function. :: #simple renaming fitarg_rename({'x':1, 'limit_x':1, 'fix_x':1, 'error_x':1}, lambda pname: 'y' if pname=='x' else pname) #{'y':1, 'limit_y':1, 'fix_y':1, 'error_y':1}, #prefixing figarg_rename({'x':1, 'limit_x':1, 'fix_x':1, 'error_x':1}, lambda pname: 'prefix_'+pname) #{'prefix_x':1, 'limit_prefix_x':1, 'fix_prefix_x':1, 'error_prefix_x':1} """ tmp = ren if isinstance(ren, str): ren = lambda x: tmp + '_' + x ret = {} prefix = ['limit_', 'fix_', 'error_', ] for k, v in fitarg.items(): vn = k pf = '' for p in prefix: if k.startswith(p): vn = k[len(p):] pf = p newvn = pf + ren(vn) ret[newvn] = v return ret
151233d0f18eaea564afbc6d600d576407504b35
707,051
def _get_matching_stream(smap, itag): """ Return the url and signature for a stream matching itag in smap. """ for x in smap: if x['itag'] == itag and x.get("s"): return x['url'], x['s'] raise IOError("Sorry this video is not currently supported by pafy")
dc83fd3207d5ab4e1c85eb719f5f7d023131565e
707,053
import functools def Debounce(threshold=100): """ Simple debouncing decorator for apigpio callbacks. Example: `@Debouncer() def my_cb(gpio, level, tick) print('gpio cb: {} {} {}'.format(gpio, level, tick)) ` The threshold can be given to the decorator as an argument (in millisec). This decorator can be used both on function and object's methods. Warning: as the debouncer uses the tick from pigpio, which wraps around after approximately 1 hour 12 minutes, you could theoretically miss one call if your callback is called twice with that interval. """ threshold *= 1000 max_tick = 0xFFFFFFFF class _decorated(object): def __init__(self, pigpio_cb): self._fn = pigpio_cb self.last = 0 self.is_method = False def __call__(self, *args, **kwargs): if self.is_method: tick = args[3] else: tick = args[2] if self.last > tick: delay = max_tick-self.last + tick else: delay = tick - self.last if delay > threshold: self._fn(*args, **kwargs) print('call passed by debouncer {} {} {}' .format(tick, self.last, threshold)) self.last = tick else: print('call filtered out by debouncer {} {} {}' .format(tick, self.last, threshold)) def __get__(self, instance, type=None): # with is called when an instance of `_decorated` is used as a class # attribute, which is the case when decorating a method in a class self.is_method = True return functools.partial(self, instance) return _decorated
156b128ffaa579ead371bff3c4b4f20a2a05646b
707,054
import argparse def key_value_data(string): """Validate the string to be in the form key=value.""" if string: key, value = string.split("=") if not (key and value): msg = "{} not in 'key=value' format.".format(string) raise argparse.ArgumentTypeError(msg) return {key: value} return {}
c7d374f1d45fb49d8629a9956948603a82802f5f
707,055
def count_digit(n, digit): """Return how many times digit appears in n. >>> count_digit(55055, 5) 4 """ if n == 0: return 0 else: if n%10 == digit: return count_digit(n//10, digit) + 1 else: return count_digit(n//10, digit)
29cf3db8cca85e14b3b537f96246803d8176441d
707,056
def linear_search(alist, key): """ Return index of key in alist . Return -1 if key not present.""" for i in range(len(alist)): if alist[i] == key: return i return -1
ab4c0517f9103a43509b0ba511c75fe03ea6e043
707,058
import math def AGI(ymod1, c02500, c02900, XTOT, MARS, sep, DSI, exact, nu18, taxable_ubi, II_em, II_em_ps, II_prt, II_no_em_nu18, c00100, pre_c04600, c04600): """ Computes Adjusted Gross Income (AGI), c00100, and compute personal exemption amount, c04600. """ # calculate AGI assuming no foreign earned income exclusion c00100 = ymod1 + c02500 - c02900 + taxable_ubi # calculate personal exemption amount if II_no_em_nu18: # repeal of personal exemptions for deps. under 18 pre_c04600 = max(0, XTOT - nu18) * II_em else: pre_c04600 = XTOT * II_em if DSI: pre_c04600 = 0. # phase-out personal exemption amount if exact == 1: # exact calculation as on tax forms line5 = max(0., c00100 - II_em_ps[MARS - 1]) line6 = math.ceil(line5 / (2500. / sep)) line7 = II_prt * line6 c04600 = max(0., pre_c04600 * (1. - line7)) else: # smoothed calculation needed for sensible mtr calculation dispc_numer = II_prt * (c00100 - II_em_ps[MARS - 1]) dispc_denom = 2500. / sep dispc = min(1., max(0., dispc_numer / dispc_denom)) c04600 = pre_c04600 * (1. - dispc) return (c00100, pre_c04600, c04600)
aed1c311bc6b46b46bfea3e9756cd73933c37ca9
707,059
def extract_sigma_var_names(filename_nam): """ Parses a 'sigma.nam' file containing the variable names, and outputs a list of these names. Some vector components contain a semicolon in their name; if so, break the name at the semicolon and keep just the 1st part. """ var_names = [] with open(filename_nam, 'r') as file: for line in file: var_name = line.strip() # check for semicolon if ';' in var_name: var_name = var_name.split(';')[0] var_names.append(var_name) return var_names
930e855d47c4303cac28e9973982392489fb577d
707,060
from typing import Any def all_tasks_stopped(tasks_state: Any) -> bool: """ Checks if all tasks are stopped or if any are still running. Parameters --------- tasks_state: Any Task state dictionary object Returns -------- response: bool True if all tasks are stopped. """ for t in tasks_state["tasks"]: if t["lastStatus"] in ("PENDING", "RUNNING"): return False return True
98edffe71052cc114a7dda37a17b3a346ef59ef8
707,062
def dataclass_fields(dc): """Returns a dataclass's fields dictionary.""" return {name: getattr(dc, name) for name in dc.__dataclass_fields__}
4b82af3bfbc02f7bbfcf1aecb6f6501ef10d86e1
707,063
import os import subprocess def gets_ontology_statistics(file_location: str, owltools_location: str = './pkt_kg/libs/owltools') -> str: """Uses the OWL Tools API to generate summary statistics (i.e. counts of axioms, classes, object properties, and individuals). Args: file_location: A string that contains the file path and name of an ontology. owltools_location: A string pointing to the location of the owl tools library. Returns: stats: A formatted string containing descriptive statistics. Raises: TypeError: If the file_location is not type str. OSError: If file_location points to a non-existent file. ValueError: If file_location points to an empty file. """ if not isinstance(file_location, str): raise TypeError('file_location must be a string') elif not os.path.exists(file_location): raise OSError('{} does not exist!'.format(file_location)) elif os.stat(file_location).st_size == 0: raise ValueError('{} is empty'.format(file_location)) else: output = subprocess.check_output([os.path.abspath(owltools_location), file_location, '--info']) res = output.decode('utf-8').split('\n')[-5:] cls, axs, op, ind = res[0].split(':')[-1], res[3].split(':')[-1], res[2].split(':')[-1], res[1].split(':')[-1] sent = 'The knowledge graph contains {0} classes, {1} axioms, {2} object properties, and {3} individuals' stats = sent.format(cls, axs, op, ind) return stats
d3293b00a49668a48a788d00a09efe603a6d7aee
707,064
def _jupyter_server_extension_paths(): """ Set up the server extension for collecting metrics """ return [{"module": "jupyter_resource_usage"}]
f59c343dd8bcdb4755c725107b3c83f12978e9ef
707,065
import os def walk(dirname, file_list): """ This function is from a book called Think Python written by Allen B. Downey. It walks through a directory, gets names of all files and calls itself recursively on all the directories """ for name in os.listdir(dirname): path=os.path.join(dirname,name) if os.path.isfile(path): file_list.append(path) else: walk(path, file_list) return file_list
fab10858f2887e30aac9e12a9a47b6d88a778a60
707,066
import os import argparse def parser(): """Parses arguments from command line using argparse. Parameters""" # default directory for reddit files default_directory = os.path.join(os.getcwd(), "data") parser = argparse.ArgumentParser() # obligatory parser.add_argument("mode", type = int, help = "execution mode: 1 build index, 2: query using existing index, 3 build and query") # conditionally obligatory parser.add_argument("--start", "-s", type = str, help = "first year/month") parser.add_argument("--end", "-e", type = str, help = "last year/month") # optional with defaults parser.add_argument("--dir", "-d", type = str, nargs = "?", default = default_directory, help = "directory for data storage") parser.add_argument("--num", "-n", type = int, nargs = "?", default = 10, help = "number of results per query") parser.add_argument("--fulltext", "-f", action = "store_true", help = "store fulltext and/or return in queries") parser.add_argument("--all", "-a", action = "store_true", help = "Return documents containing all rather than any of the query terms") parser.add_argument("--minfreq", "-m", type = int, nargs = "?", default = 5, help = "minimum term frequency") parser.add_argument("--lemma", "-l", action = "store_true", help = "lemmatize comments/queries") parser.add_argument("--cores", "-c", type = int, nargs = "?", default = 1, help = "number of cores to use") parser.add_argument("--progress", "-p", action = "store_true", help = "report progress") return parser
271f4a5db0a5e8f6b201c098830885e768d246b7
707,067
def _synced(method, self, args, kwargs): """Underlying synchronized wrapper.""" with self._lock: return method(*args, **kwargs)
54ca3cf69742550bd34ff3d2299a2d84f78577a3
707,069
import os def reponame(url, name=None): """ Determine a repo's cloned name from its URL. """ if name is not None: return name name = os.path.basename(url) if name.endswith('.git'): name = name[:-4] return name
be8bb47f1fc8be940e469d6a999a2039edb2fa3a
707,072
import argparse from typing import Dict def specified_options(opts: argparse.Namespace, exclude=None) -> Dict: """ Cast an argparse Namespace into a dictionary of options. Remove all options that were not specified (equal to None). Arguments: opts: The namespace to cast. exclude: Names of options to exclude from the result. Returns: A dictionary of specified-only options. """ exclude = exclude or set() options = opts.__dict__.items() # noqa: WPS609 return {opt: value for opt, value in options if value is not None and opt not in exclude}
b1200fbedb5edcd8b44fef3e30f644cc582ca23f
707,073
import torch def pytorch_argmax(op): """Implementation of argmax for pytorch.""" def _impl(x, dim): dim = tuple(sorted(dim)) n = () for _s in range(len(x.shape)): if _s not in dim: n = n + (_s,) n = n + dim x = x.permute(n) ns = x.shape[0 : -len(dim)] + (-1,) r = torch.argmax(x.reshape(ns), -1, keepdim=False) rl = list(r.shape) for _sd in dim: rl.insert(_sd, 1) rf = tuple(rl) return (torch.reshape(r, rf),) return _impl, op.inputs[1:]
cc466b41c0dd4bb9730dcdf50816b9d0cf66cfaa
707,074
def loadRowCluster(ndPage,algo): """ load cluster algo = aglo """ xpCluster = f".//Cluster[@algo='{algo}']" lClusters= ndPage.xpath(xpCluster) return lClusters
dcb75214e58d6656f58bee78b904562c05fd36d8
707,075
def audit_umbrelladns(networks_fwrules): """Accepts a list of firewall rules for a client Checks for rules to allow DNS lookups to Umbrella and deny all other DNS lookups. Returns a list of clients and a boolean of whether Umbrella DNS is configured properly""" umbrelladns_audit = [] host1 = '208.67.222.222/32' host2 = '208.67.220.220/32' for customer in networks_fwrules: customer_result = { 'organizationId': customer['organizationId'], 'organizationName': customer['organizationName'] } for network in customer['networks']: umbrella_allow, dns_deny = 'False', 'False' if 'l3FirewallRules' in network: for rule in network['l3FirewallRules']: destcidr = rule['destCidr'].split(",") if rule['policy'] == 'allow' \ and rule['protocol'] == 'tcp' \ and rule['destPort'] == '53' \ and (host1 in destcidr and host2 in destcidr): umbrella_allow = 'True' if rule['policy'] == 'allow' \ and rule['protocol'] == 'udp' \ and rule['destPort'] == '53' \ and (host1 in destcidr and host2 in destcidr): umbrella_allow = 'True' if rule['policy'] == 'deny' \ and rule['protocol'] == 'tcp' \ and rule['destPort'] == '53' \ and rule['destCidr'] == 'Any': dns_deny = 'True' if rule['policy'] == 'deny' \ and rule['protocol'] == 'udp' \ and rule['destPort'] == '53' \ and rule['destCidr'] == 'Any': dns_deny = 'True' if umbrella_allow is 'True' and dns_deny is 'True': customer_result['umbrellaDns'] = 'True' else: customer_result['umbrellaDns'] = 'False' umbrelladns_audit.append(customer_result) return umbrelladns_audit
26c01011dee998ba398db03603c61c00845055ea
707,076
import inspect def get_all_methods(klass): """Get all method members (regular, static, class method). """ if not inspect.isclass(klass): raise ValueError pairs = list() for attr, value in inspect.getmembers( klass, lambda x: inspect.isroutine(x)): if not (attr.startswith("__") or attr.endswith("__")): pairs.append((attr, value)) return pairs
ada4f47c750455ddd1300f26eb3e296b046acefe
707,077
import pathlib def _suffix_directory(key: pathlib.Path): """Converts '/folder/.../folder/folder/folder' into 'folder/folder'""" key = pathlib.Path(key) shapenet_folder = key.parent.parent key = key.relative_to(shapenet_folder) return key
147539065c3d21ee351b23f2d563c662fe55f04a
707,078
def atomic(fn, self, *args, **kwargs): """ Atomic method. """ return self._atom(fn, args, kwargs)
96fdd8451bb534deefb2ffbe101526838d75fa6e
707,079
def text_to_string(filename, useEncoding): """Read a text file and return a string.""" with open(filename, encoding=useEncoding, errors='ignore') as infile: return infile.read()
f879bb747699496204820b74944fd563658a7117
707,080
def iscomment(s): """ Define what we call a comment in MontePython chain files """ return s.startswith('#')
ab3a9d240e423c562c9e83cdd9599fddf144b7c3
707,081
def fix_bayes_factor(bayes_factor): """ If one of the bayes factors is 'inf' we get a string instead of a tuple back. This is hacky but fixes that. """ # Maximum cut off for Bayes factor value max_bf = 1e12 if type(bayes_factor) == str: bayes_factor = bayes_factor.split(",") bayes_factor = [min(float(x), max_bf) for x in bayes_factor] bayes_factor = tuple(bayes_factor) bayes_factor = bayes_factor[0] return bayes_factor
7e7912ea9b0c90f0945f486aa397a2df2d13d5cc
707,082
import os def absolute_path_without_git(directory): """ return the absolute path of local git repo """ return os.path.abspath(directory + "/..")
1547bdebd8e6375f6725dbf36ae99a93eec5053b
707,083
import re import json def parse_to_json(data_str): """ Convert string to a valid json object """ json_obj_list = [] obj = data_str.split('%') for record in obj: attributes = re.split(',', record) data = json.dumps(attributes) data = re.sub(r':', '":"', data) data = re.sub(r'\[', '{', data) data = re.sub(r']', '}', data) json_obj_list.append(data) return json_obj_list
288911694548fd603a3a261ac9c51c5c971599e0
707,084
import os import re def get_version(): """Get project version """ version_file_path = os.path.join( os.path.dirname(__file__), 'spowtd', 'VERSION.txt') with open(version_file_path) as version_file: version_string = version_file.read().strip() version_string_re = re.compile('[0-9.]+') match = version_string_re.match(version_string) if match is None: raise ValueError( 'version string "{}" does not match regexp "{}"' .format(version_string, version_string_re.pattern)) return match.group(0)
4a11ae4efd6269b29acdef0835461ede33cf6e5c
707,085
import logging def logger(module_name: str): """Инициализация и конфигурирования логгера""" logging.basicConfig( level=logging.INFO, format='[%(levelname)s][%(asctime)s] %(name)s: %(message)s' ) return logging.getLogger(module_name)
0a436b50d16c752404d31e3f34b38239391236d5
707,086
def apply_tariff(kwh, hour): """Calculates cost of electricity for given hour.""" if 0 <= hour < 7: rate = 12 elif 7 <= hour < 17: rate = 20 elif 17 <= hour < 24: rate = 28 else: raise ValueError(f'Invalid hour: {hour}') return rate * kwh
fb2c5b458c13456a39612720b6e80e0cd707391e
707,087
from typing import BinaryIO def tail(the_file: BinaryIO, lines_2find: int = 20) -> list[bytes]: """ From http://stackoverflow.com/questions/136168/get-last-n-lines-of-a-file-with-python-similar-to-tail """ lines_found: int = 0 total_bytes_scanned: int = 0 the_file.seek(0, 2) bytes_in_file: int = the_file.tell() while lines_2find + 1 > lines_found and bytes_in_file > total_bytes_scanned: byte_block: int = min(1024, bytes_in_file - total_bytes_scanned) the_file.seek(-(byte_block + total_bytes_scanned), 2) total_bytes_scanned += byte_block lines_found += the_file.read(1024).count(b"\n") the_file.seek(-total_bytes_scanned, 2) line_list: list[bytes] = list(the_file.readlines()) return line_list[-lines_2find:] # We read at least 21 line breaks from the bottom, block by block for speed # 21 to ensure we don't get a half line
094917839d4b26e284244715452982eaf6e8c08a
707,088
def CommaSeparatedFloats(sFloatsCSV): """Read comma-separated floats from string. [sFloatsCSV]: string, contains comma-separated floats. <retval>: list, floats parsed from string. """ return [float(sFloat) for sFloat in sFloatsCSV.replace(" ","").split(",")]
1aa12ca7297aa3bd809f6d2ffaf155233a826b49
707,089
import os def get_filenames(is_training, data_dir, num_files=1014): """Return filenames for dataset.""" if is_training: return [ os.path.join(data_dir, "train-%05d-of-01014" % i) for i in range(num_files) ] else: return [ os.path.join(data_dir, "validation-%05d-of-00128" % i) for i in range(128) ]
4381513fce78d7d491866db4f67a57496530d67c
707,090
import ast def bit_xor(*arguments): """ Bitwise XOR function. """ return ast.BitXor(*arguments)
07af3232a18796b4122e3ac6a4279ec00032c31d
707,091
def topological_sort(g): """ Returns a list of vertices in directed acyclic graph g in topological order. """ ready = [] topo = [] in_count = {} for v in g.vertices(): in_count[v] = g.degree(v, outgoing=False) if in_count[v] == 0: # v has no constraints, i.e no incoming edges ready.append(v) while len(ready) > 0: u = ready.pop() topo.append(u) for e in g.incident_edges(u): v = e.opposite(u) in_count[v] -= 1 # v now no longer has u as a constraint if in_count[v] == 0: ready.append(v) return topo
5ac6261bf1b6fa92280abdc3fc95679ad9294e80
707,092
def _perform_sanity_checks(config, extra_metadata): """ Method to perform sanity checks on current classification run. :param config: dirbs config instance :param extra_metadata: job extra metadata dict obj :return: bool (true/false) """ curr_conditions = [c.as_dict() for c in config.conditions] curr_operators = [op.as_dict() for op in config.region_config.operators] curr_amnesty = config.amnesty_config.as_dict() if curr_conditions == extra_metadata['conditions'] and \ curr_operators == extra_metadata['operators'] and \ curr_amnesty == extra_metadata['amnesty']: return True return False
fa5fa39bae91393c4f91ab6aa3b595f8a0db2e4f
707,093
def static_message_fixture(tmpdir_factory, prefix, message, suffix): """A fixture which provides a static message.""" filename = tmpdir_factory.mktemp('data').join('static_message.txt').strpath file_contents = "{0}{1}{2}".format(prefix, message, suffix) with open(filename, 'w') as f: f.write(file_contents) return filename
a9a11508eb10760452cad557e792df30b068e8bc
707,094
from typing import List from typing import Tuple def getElementByClass(className: str, fileName: str) -> List[Tuple[int, str]]: """Returns first matching tag from an HTML/XML document""" nonN: List[str] = [] with open(fileName, "r+") as f: html: List[str] = f.readlines() for line in html: nonN.append(line.replace("\n", "")) pattern: str = f'class="{className}"' patternAlt: str = f"class='{className}'" matches: List[Tuple[int, str]] = [] for line in nonN: if pattern in line or patternAlt in line: lineNo = nonN.index(line) + 1 matches.append((int(lineNo), line)) break return matches
969e4070e16dec2e10e26e97cbaaab9d95e7b904
707,095
def exec_quiet(handle, *args, **kwargs): """ Like exe.execute but doesnt print the exception. """ try: val = handle(*args, **kwargs) except Exception: pass else: return val
d0e922672c8a2d302bc2bfcb30bec91d32988945
707,097
def _remarks(item: str) -> str: """Returns the remarks. Reserved for later parsing""" return item
d515837e52ee88edeb5bdb5e8f2d37ed28789362
707,099
import math def func (x): """ sinc (x) """ if x == 0: return 1.0 return math.sin (x) / x
c91242e360547107f7767e442f40f4bf3f2b53e8
707,100
def grad_norm(model=None, parameters=None): """Compute parameter gradient norm.""" assert parameters is not None or model is not None total_norm = 0 if parameters is None: parameters = [] if model is not None: parameters.extend(model.parameters()) parameters = [p for p in parameters if p.grad is not None and p.requires_grad] for p in parameters: param_norm = p.grad.detach().data.norm(2) total_norm += param_norm.item()**2 total_norm = total_norm**0.5 return total_norm
ff471715a72f0d2afbafa60d19eb802a748a2419
707,101
import copy def db_entry_trim_empty_fields(entry): """ Remove empty fields from an internal-format entry dict """ entry_trim = copy.deepcopy(entry) # Make a copy to modify as needed for field in [ 'url', 'title', 'extended' ]: if field in entry: if (entry[field] is None) or \ (type(entry[field]) is str and len(entry[field]) == 0): del entry_trim[field] return entry_trim
d5b31c823f4e8091872f64445ab603bcbf6a2bef
707,102
import os def solrctl(): """ solrctl path """ for dirname in os.environ.get('PATH', '').split(os.path.pathsep): path = os.path.join(dirname, 'solrctl') if os.path.exists(path): return path return None
dd34e187d2ae27514c2290bda69cfd1f538c76ea
707,103
def _is_install_requirement(requirement): """ return True iff setup should install requirement :param requirement: (str) line of requirements.txt file :return: (bool) """ return not (requirement.startswith('-e') or 'git+' in requirement)
339f6a8a573f33157a46193216e90d62475d2dea
707,104
def move_to_next_pixel(fdr, row, col): """ Given fdr (flow direction array), row (current row index), col (current col index). return the next downstream neighbor as row, col pair See How Flow Direction works http://desktop.arcgis.com/en/arcmap/latest/tools/spatial-analyst-toolbox/how-flow-direction-works.htm D8 flow direction grid | 32 | 64 | 128 | | 16 | X | 1 | | 8 | 4 | 2 | """ # get the fdr pixel value (x,y) value = fdr[row, col] # Update the row, col based on the flow direction if value == 1: col += 1 elif value == 2: col += 1 row += 1 elif value == 4: row += 1 elif value == 8: row += 1 col -= 1 elif value == 16: col -= 1 elif value == 32: row -= 1 col -= 1 elif value == 64: row -= 1 elif value == 128: row -= 1 col += 1 else: # Indetermine flow direction, sink. Do not move. row = row col = col return (row, col)
d134bb35ed4962945c86c0ac2c6af1aff5acd06b
707,105
import math as m from math import sin, cos, atan, asin, floor def equ2gal(ra, dec): """Converts Equatorial J2000d coordinates to the Galactic frame. Note: it is better to use AstroPy's SkyCoord API for this. Parameters ---------- ra, dec : float, float [degrees] Input J2000 coordinates (Right Ascension and Declination). Returns ------- glon, glat: float, float [degrees] """ OB = m.radians(23.4333334); dec = m.radians(dec) ra = m.radians(ra) a = 27.128251 # The RA of the North Galactic Pole d = 192.859481 # The declination of the North Galactic Pole l = 32.931918 # The ascending node of the Galactic plane on the equator sdec = sin(dec) cdec = cos(dec) sa = sin(m.radians(a)) ca = cos(m.radians(a)) GT = asin(cdec * ca * cos(ra - m.radians(d)) + sdec * sa) GL = m.degrees(atan((sdec - sin(GT) * sa) / (cdec * sin(ra - m.radians(d)) * ca))) TP = sdec - sin(GT) * sa BT = cdec * sin(ra - m.radians(d)) * ca if (BT < 0): GL += 180 else: if (TP < 0): GL += 360 GL += l if (GL > 360): GL -= 360 LG = floor(GL) LM = floor((GL - floor(GL)) * 60) LS = ((GL - floor(GL)) * 60 - LM) * 60 GT = m.degrees(GT) D = abs(GT) if (GT > 0): BG = floor(D) else: BG = -1*floor(D) BM = floor((D - floor(D)) * 60) BS = ((D - floor(D)) * 60 - BM) * 60 if (GT < 0): BM = -BM BS = -BS #if GL > 180: # GL -= 360 return (GL, GT)
ebed665e798a00b649367bc389747f046659d9af
707,106
from typing import Union def addition(a:Union[int, float], b:Union[int, float]) -> Union[int, float]: """ A simple addition function. Add `a` to `b`. """ calc = a + b return calc
b9adaf3bea178e23bd4c02bdda3f286b6ca8f3ab
707,107
from functools import reduce import random import hashlib def get_hash(dictionary): """Takes a dictionary as input and provides a unique hash value based on the values in the dictionary. All the values in the dictionary after converstion to string are concatenated and then the HEX hash is generated :param dictionary: A python dictionary :return: A HEX hash Credit: https://gitlab.com/calledbymountains/cvdatasetmanagement/blob/master/utils/gen_utils.py """ if not isinstance(dictionary, dict): raise ValueError('The argument must be ap ython dictionary.') str_input = reduce(lambda x, y: str(x) + str(y), list(dictionary.values())) str_input = ''.join(random.sample(str_input, len(str_input))) hash_object = hashlib.shake_128(str_input.encode()) output = hash_object.hexdigest(12) return output
2e69c397611510151996d152c4fc0b5573d62fdc
707,108
def aggregate_extrema(features, Th, percentage = True) : """ Summary: Function that tries to remove false minima aggregating closeby extrema Arguments: features - pandas series containing the extrema to be aggregated. The series is of the form: Max, Min, Max, Max, Min, ... Th - threshold used to remove 'false' minima percentage - tells if the thrshold is expressed as percentage of the distance between adjacent maxima and minima Returns: aggregatedFeat - pandas vector with aggregated features """ # Keep the first maximum and minimum ind = [0] # Factor used to scale the threshold depending on 'percentage' d = 1 skipNext = False # For each minima check if it can be merged with the right node for ii in range(1, len(features), 3) : if skipNext : skipNext = False continue # check if are at the end of the feature vector if ii + 2 >= len( features ) : # Record the point which is the last in the list ind.append(ii) # Current minima ind.append(ii + 1) # Following maxima break aggregate = False # check if the next two maxima coincide if features[ii+1] == features[ii+2] : # find what is lowest minimum if features[ ii ] > features[ii + 3] : # try to aggregate on the left if percentage : d = features[ii - 1] - features[ii + 3] if (features[ii-1] > features[ii+1]) and (features[ii+1] - features[ii] < Th * d): aggregate = True # in this case, the point and the next 2 coincident maxima # should not be included in the output list else : # try to aggregate on the right if percentage : d = features[ii + 4] - features[ii] if (features[ii+4] > features[ii+2]) and (features[ii+2] - features[ii+3] < Th * d): aggregate = True # in this case, the point should be included but the next should not ind.append(ii) # Current minima ind.append(ii+4) if ii + 5 < len(features) : ind.append(ii+5) skipNext = True # skip the next minima that has already been processed if not aggregate: # Record the point ind.append(ii) # Current minima ind.append(ii + 1) # Following maxima ind.append(ii + 2) # Maxima of the next minima # check if the last max was copied twice if features[ind[-1]] == features[ind[-2]]: ind.pop() return features[ind].copy()
6eeed204de4c39f8b66353595cbc04800bb1b176
707,109
import contextlib import sqlite3 def run_sql_command(query: str, database_file_path:str, unique_items=False) -> list: """ Returns the output of an SQL query performed on a specified SQLite database Parameters: query (str): An SQL query database_file_path (str): absolute path of the SQLite database file unique_items (bool): whether the function should return a list of items instead of a list of tuples with one value Returns: records (list): The output of the SQLite database """ with contextlib.closing(sqlite3.connect(database_file_path)) as conn: with conn: with contextlib.closing(conn.cursor()) as cursor: # auto-closes cursor.execute(query) records = cursor.fetchall() if unique_items: return [x[0] for x in records] return records
705584db31fd270d4127e7d1b371a24a8a9dd22e
707,110
def compact_float(n, max_decimals=None): """Reduce a float to a more compact value. Args: n: Floating point number. max_decimals: Maximum decimals to keep; defaults to None. Returns: An integer if `n` is essentially an integer, or a string representation of `n` reduced to `max_decimals` numbers after the decimal point. Otherwise, simply returns `n`. """ compact = n if float(n).is_integer(): compact = int(n) elif max_decimals is not None: compact = "{0:.{1}f}".format(n, max_decimals) return compact
827e49e05aaca31d497f84c2a8c8dd52cfad73d9
707,112