content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def tobin(data, width): """ """ data_str = bin(data & (2**width-1))[2:].zfill(width) return [int(x) for x in tuple(data_str)]
1679bc6826cbfd226e99f33dbfd049a284c26a75
705,008
def open_file_externally(path: str) -> None: """open_file_externally(path: str) -> None (internal) Open the provided file in the default external app. """ return None
8bb6f5c19ad89fbef59e2ecbec89d5e2b5d05783
705,010
def counter(): """Creates a counter instance""" x = [0] def c(): x[0] += 1 return x[0] return c
0f78a34b53bc5cc8b125a939cd88f58b047607a0
705,011
import re def Element(node, tag, mandatory=False): """Get the element text for the provided tag from the provided node""" value = node.findtext(tag) if value is None: if mandatory: raise SyntaxError("Element '{}.{}' is mandatory, but not present!".format(node.tag, tag)) return None else: value = re.sub(r'\s+', ' ', value) return value.strip()
2173f1ff50f8c685496d9b2708b19f1d6d808fb5
705,012
import base64 def fix_string_attr(tfjs_node): """ Older tfjs models store strings as lists of ints (representing byte values). This function finds and replaces those strings, so protobuf can correctly decode the json. """ def fix(v): if isinstance(v, list): return base64.encodebytes(bytes(v)).decode() return v if 'attr' not in tfjs_node: return for v in tfjs_node['attr'].values(): if 's' in v: v['s'] = fix(v['s']) if 'list' in v and 's' in v['list']: for i, x in enumerate(v['list']['s']): v['list']['s'][i] = fix(x)
c137144fd9a42134451d2c49c93b20d562f1188b
705,013
def maximumProduct(nums): """ :type nums: List[int] :rtype: int """ nums = sorted(nums) first_option=nums[0]*nums[1]*nums[-1] second_option=nums[-3] * nums[-2] * nums[-1] return first_option if first_option > second_option else second_option
2ebbc11893499d18fcbf7630fc815b07abf329fd
705,014
import os def get_config_filepath(): """Return the filepath of the configuration file.""" default_config_root = os.path.join(os.path.expanduser('~'), '.config') config_root = os.getenv('XDG_CONFIG_HOME', default=default_config_root) return os.path.join(config_root, 'zoia/config.yaml')
53d78749adf56219ca08b3b4556901241608a57d
705,015
def friction_fnc(normal_force,friction_coefficient): """Usage: Find force of friction using normal force and friction coefficent""" return normal_force * friction_coefficient
7c25e651d7ef8990eab049a5b356f5470496af8e
705,016
def last_char(text: str, begin: int, end: int, chars: str) -> int: """Returns the index of the last non-whitespace character in string `text` within the bounds [begin, end]. """ while end > begin and text[end - 1] in chars: end -= 1 return end
5d59cd50fb99593d5261513327b9799fc175cd6c
705,017
import time def next_tide_state(tide_info, current_time): """Compute next tide state""" # Get next tide time next_tide = tide_info.give_next_tide_in_epoch(current_time) if next_tide.get("error") == None: tidetime = time.strftime("%H:%M", time.localtime(next_tide.get("tide_time"))) tidetype = next_tide.get("tide_type") tide_string = f"{tidetype} tide at {tidetime}" return tide_string else: return None
cc4f78cf41aa76d3788b69daaf64f4711d68714f
705,019
def convert_idx(text, tokens): """ Calculates the coordinates of each start end spans of each token. :param text: The text to extract spans from. :param tokens: The tokens of that text. :return: A list of spans. """ current = 0 spans = [] for token in tokens: current = text.find(token, current) if current < 0: print("Token {} cannot be found".format(token)) raise Exception() spans.append((current, current + len(token))) current += len(token) return spans
6022dca6591ae4a9bea3902af09ff59fee7d5cd5
705,020
import os def get_materials(): """return _materials dictionary, creating it if needed""" mat = {} fname = 'materials.dat' if os.path.exists(fname): fh = open(fname, 'r') lines = fh.readlines() fh.close() for line in lines: line = line.strip() if len(line) > 2 and not line.startswith('#'): name, f, den = [i.strip() for i in line.split('|')] mat[name.lower()] = (f.replace(' ', ''), float(den)) return mat
b89b230954d5690314069810b4595a49557e6620
705,021
import subprocess import os def decoratebiom(biom_file, outdir, metadata, core=""): """inserts rows and column data """ out_biom = '.'.join(biom_file.split('.')[0:-1]) + '.meta.biom' cmd_sample = f"biom add-metadata -i {biom_file} -o {out_biom} -m {metadata} --output-as-json" res_add = subprocess.check_output(cmd_sample, shell=True) if core == "core": in_biom = '.'.join(biom_file.split('.')[0:-1]) + '.meta.biom' out_biom = '.'.join(biom_file.split('.')[0:-1]) + '.metacore.biom' metadata_f = os.path.join(outdir, 'BiG-MAP.map.core.coverage.txt') cmd_feature = f"biom add-metadata --observation-metadata-fp {metadata_f} -i {in_biom} -o {out_biom} --output-as-json" res_feature = subprocess.check_output(cmd_feature, shell=True) else: in_biom = '.'.join(biom_file.split('.')[0:-1]) + '.meta.biom' out_biom = '.'.join(biom_file.split('.')[0:-1]) + '.meta.biom' metadata_f = os.path.join(outdir, 'BiG-MAP.map.coverage.txt') cmd_feature = f"biom add-metadata --observation-metadata-fp {metadata_f} -i {in_biom} -o {out_biom} --output-as-json" res_feature = subprocess.check_output(cmd_feature, shell=True) return (out_biom)
1c327110ba7b27d710dced5e3d59cfabf3f440fc
705,022
def get_arg_name(node): """ Args: node: Returns: """ name = node.id if name is None: return node.arg else: return name
fecee0dfa53bbb4e1d520e13e5e2363e9035454b
705,023
import ast def parse_code_str(code_str) -> ast.AST: """Parses code string in a computation, which can be incomplete. Once we found something that leads to error while parsing, we should handle it here. """ if code_str.endswith(":"): code_str += "pass" try: return ast.parse(code_str) except IndentationError: return ast.parse(code_str.strip())
ed0c2101dd38ca5e2fc390db3ba94b7fe13ff44d
705,025
def fibonacci_list(n): """ 用列表缓存中间的计算结果 :param n: n>0 :return """ lst = [0, 1, 1] while len(lst) < n + 1: ln = len(lst) lst.append(lst[ln - 1] + lst[ln - 2]) return lst[0] if n < 0 else lst[n]
02890bd5877c49d5e4d7f053a64dd9cfdeaa7d7d
705,026
def merge(line): """ Function that merges a single row or column in 2048. """ result = [] count = 0 newline = [] result2 = [] count2 = 0 for a in range(len(line)): result.append(0) for a in range(len(line)): result2.append(0) for num in line: if num != 0: result[count] = num count += 1 for num in range(len(result)): if num != len(result)-1 and result[num] == result[num+1]: newline.append(result[num]+result[num+1]) result[num+1] = 0 else: newline.append(result[num]) for num in newline: if num != 0: result2[count2] = num count2 += 1 return result2
5992c1bc48af124b069fd31419fec5b6edd5f3ab
705,027
def _perc(a, b): """ Funzione di utility: fa il rapporto tra "a" e "b", ritornandone la percentuale a due cifre """ return 'N/A' if b == 0 else round(100.0 * a / b, 2)
aa0f4c0fa09dc77b422b3779d0e9e2484b0df348
705,028
def shift_left_bit_length(x: int) -> int: """ Shift 1 left bit length of x :param int x: value to get bit length :returns: 1 shifted left bit length of x """ return 1 << (x - 1).bit_length()
854e79309125c60c6e5975685078809fb4c016a4
705,029
import argparse def cli(): """Parse and return command line arguments.""" parser = argparse.ArgumentParser() parser.add_argument( 'file', nargs='?', default='/home/sam/notes/2020-08-28_5.md', help='reading list file for reading/writing markdown notes' ) args = parser.parse_args() return args
80a6ee8ff618aa9cfaadfddab7daccff3fe7fa1e
705,030
from typing import List def get_function_contents_by_name(lines: List[str], name: str): """ Extracts a function from `lines` of segmented source code with the name `name`. Args: lines (`List[str]`): Source code of a script seperated by line. name (`str`): The name of the function to extract. Should be either `training_function` or `main` """ if name != "training_function" and name != "main": raise ValueError(f"Incorrect function name passed: {name}, choose either 'main' or 'training_function'") good_lines, found_start = [], False for line in lines: if not found_start and f"def {name}" in line: found_start = True good_lines.append(line) continue if found_start: if name == "training_function" and "def main" in line: return good_lines if name == "main" and "if __name__" in line: return good_lines good_lines.append(line)
60239b0063e83a71641d85194f72a9cc61221177
705,031
import ctypes from ctypes.util import find_library import errno def create_linux_process_time(): """ Uses :mod:`ctypes` to create a :func:`time.process_time` on the :samp:`'Linux'` platform. :rtype: :obj:`function` :return: A :func:`time.process_time` equivalent. """ CLOCK_PROCESS_CPUTIME_ID = 2 # time.h clockid_t = ctypes.c_int time_t = ctypes.c_long class timespec(ctypes.Structure): _fields_ = [ ('tv_sec', time_t), # seconds ('tv_nsec', ctypes.c_long) # nanoseconds ] _clock_gettime = ctypes.CDLL( find_library('rt'), use_errno=True).clock_gettime _clock_gettime.argtypes = [clockid_t, ctypes.POINTER(timespec)] def process_time(): tp = timespec() if _clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ctypes.byref(tp)) < 0: err = ctypes.get_errno() msg = errno.errorcode[err] if err == errno.EINVAL: msg += ( "The clk_id (4) specified is not supported on this system") raise OSError(err, msg) return tp.tv_sec + tp.tv_nsec * 1e-9 return process_time
d1c479e059ad17c8377db0f6012a7e8ab55b1905
705,032
import json def get_json(obj, indent=4): """ Get formatted JSON dump string """ return json.dumps(obj, sort_keys=True, indent=indent)
be1376fcb9e820cc5012f694ca830ba0c52b5fef
705,033
def build_operation(id, path, args, command="set", table="block"): """ Data updates sent to the submitTransaction endpoint consist of a sequence of "operations". This is a helper function that constructs one of these operations. """ if isinstance(path, str): path = path.split(".") return {"id": id, "path": path, "args": args, "command": command, "table": table}
74656a7568a6d705c9c24c091660b93d16977512
705,034
def compute_min_refills(distance: int, tank: int, stops: list): """ Computes the minimum number of gas station pit stops. >>> compute_min_refills(950, 400, [200, 375, 550, 750]) 2 >>> compute_min_refills(10, 3, [1, 2, 5, 9]) -1 Example 3: >>> compute_min_refills(200, 250, [100, 150]) 0 """ previous, current = 0, 0 positions = [0] + stops + [distance] num_refills, cur_position = 0, 0 while current <= len(stops): previous = current while current <= len(stops) and ( positions[current + 1] - positions[previous] ) <= tank: current += 1 cur_position = positions[current] if current == previous: return -1 # destination not possible if cur_position < distance: num_refills += 1 return num_refills
41dff6085f3b46b191c40c3dde9b68ee3ee41e3e
705,036
def invert_dict(d): """Invert dict d[k]=v to be p[v]=[k1,k2,...kn]""" p = {} for k, v in d.items(): try: p[v].append(k) except KeyError: p[v] = [k] return p
1438ad5879cccf89030cb96dc5ae6c024f8e417c
705,037
def _upper(string): """Custom upper string function. Examples: foo_bar -> FooBar """ return string.title().replace("_", "")
04ad1596657736847e909e0c4937afc407ea1f60
705,038
import re def escape_sql_string(string: str) -> str: """ Escapes single quotes and backslashes with a backslash and wraps everything between single quotes. """ escaped_identifier = re.sub(r"(['\\])", r"\\\1", string) return f"'{escaped_identifier}'"
68f91b6a5c5bfcec6298f6b6f5c7dfb6b7a095f5
705,039
def _round_to_base(x, base=5): """Round to nearest multiple of `base`.""" return int(base * round(float(x) / base))
beccfe2951b9fcc7aafef57fd966418df1ce2cc1
705,040
def performStats(dataArray): """ Statically calculate and assign summed values of occurances to each entry """ yearArray = [[0,0] for i in range(20)] for entry in dataArray: oSum = 0 nSum = 0 for k, v in entry.old.items(): # print(k,v) oSum += v for k,v in entry.new.items(): # print(k,v) nSum += v entry.oldSum = oSum entry.newSum = nSum idx = int(entry.year)%20 #0-19 index yearArray[idx][0] += entry.oldSum yearArray[idx][1] += entry.newSum return yearArray
444c291504783c6cf353c9dad0b4a33c0c4fa172
705,041
from typing import Any def ispointer(obj: Any) -> bool: """Check if a given obj is a pointer (is a remote object). Args: obj (Any): Object. Returns: bool: True (if pointer) or False (if not). """ if type(obj).__name__.endswith("Pointer") and hasattr(obj, "id_at_location"): return True return False
34bdf58b8352a11d878043ee2611d0b7c2a0dae5
705,042
def context(): """context: Overwritten by tests.""" return None
1bd0bc8ca8c9829ffcb7b141b7cf64dfcd87df45
705,043
def svm_predict(model, samples): """Predicts the response based on the trained model""" return model.predict(samples)[1].ravel()
a510a64e602bbe14a3aa192cacd11b996704d91e
705,044
def __none_to_zero(string): """ Return '0' if the string is "none" or "null"; return the string itself otherwise. @type string: C{string} @param string: The string to test for values of "none" or "null". @rtype: C{string} @return: '0' if the string is "none" or "null", the string itself otherwise. """ if (string.lower() == "none" or string.lower() == "null"): return '0' else: return string
45da6720f4e8b6047e161dfe985421c8c7b37a38
705,045
import glob import os def include(d, e): """Generate a pair of (directory, file-list) for installation. 'd' -- A directory 'e' -- A glob pattern""" return (d, [f for f in glob.glob('%s/%s'%(d, e)) if os.path.isfile(f)])
b1afcf1698a2991001c480cc009bbe1858ce8120
705,046
def ubatch_to_csv(batch): """ Utility function to convert a batch of APIUser data to CSV. """ permkey = 'permissions_dict' fields = [k for k in batch[0].keys() if k != permkey] fields.extend(batch[0][permkey].keys()) return '{}\n{}'.format(','.join(fields), '\n'.join([ ','.join([str(r.get(f, r[permkey].get(f, None))) for f in fields]) for r in batch ]))
9950cb8e1f79f2cc37580142a125717e7e534de1
705,047
def guid_to_num(guid): """ Convert a DHT guid to an integer. Args: guid: The guid to convert, as a string or unicode, in hexadecimal. Returns: An integer corresponding to the DHT guid given. """ return int(guid.rstrip('L'), base=16)
7da3e7a60b6ae3410baab62083714f47a3afc790
705,048
import itertools def gather_slice_list_items(slices, key): """For a list of slices, get the flattened list of all of a certain key.""" return list(itertools.chain(*[s[key] for s in slices if key in s]))
068b511aefa124f9881f0d8cdc4d115b15922066
705,049
def contributions(datafile): """ text data file => list of string """ contribs = [] with open(datafile, 'r') as data: for line in data.readlines(): line = line.strip() line_data = line.split(" ") info_string = " ".join(line_data[:-1]) contrib = {} contrib['url'] = line_data[-1].strip() contrib['info'] = info_string.strip() contribs.append(contrib) return contribs
37c5743df822be2cefdbe0bad60db35491ea599d
705,050
import random def randomize_demand(demand): """Return a randomized demand when given a static demand""" return random.uniform(0, 2.25) * demand
01eed8f0008e71af117920782a2a42b566055a89
705,051
import argparse def parse_args(args): """define arguments""" parser = argparse.ArgumentParser(description="go_term_enrichment") parser.add_argument( "file_names", type=str, help="Name of folder and filenames for the promoters extracted", ) parser.add_argument( "go_directory", type=str, help="Directory location of go term enrichment files", ) parser.add_argument( "background_gene_set", type=str, help="Location of background gene set" ) parser.add_argument( "NCBI_gene_list", type=str, help="Location of NCBI gene list" ) parser.add_argument( "genes_of_interest", type=str, help="Location of genes of interest" ) parser.add_argument( "variable1_name", type=str, help="Optional replacement name for 2nd variable eg. non-specific", default="constitutive", nargs="?", ) parser.add_argument( "variable2_name", type=str, help="Optional replacement name for 2nd variable eg. tissue_specific", default="variable", nargs="?", ) parser.add_argument( "author_name", type=str, help="Optional replacement name for author in reference to the geneset", default="Czechowski", nargs="?", ) return parser.parse_args( args )
9501ca0e9e603231751a2e7fe7a1dcf90f753be4
705,052
import subprocess def clip(text): """ Attempts to copy the specified text to the clipboard, returning a boolean indicating success. """ text_bytes = text.encode() try: pbcopy = subprocess.Popen("pbcopy", stdin=subprocess.PIPE, stdout=subprocess.PIPE) pbcopy.communicate(text_bytes) return(not pbcopy.returncode) except OSError: try: xclip = subprocess.Popen("xclip", stdin=subprocess.PIPE, stdout=subprocess.PIPE) xclip.communicate(text_bytes) return(not xclip.returncode) except OSError: pass return(False)
7096bc53dfc1d33af0536143ebb7d09c23e29e0f
705,053
def get_item(): """Returns a dict representing an item.""" return { 'name': 'Nikon D3100 14.2 MP', 'category': 'Cameras', 'subcategory': 'Nikon Cameras', 'extended_info': {} }
692c3d83ee1cc04026e71b7ad7357ebd9930f47f
705,054
import torch def abs_(input): """ In-place version of :func:`treetensor.torch.abs`. Examples:: >>> import torch >>> import treetensor.torch as ttorch >>> t = ttorch.tensor([12, 0, -3]) >>> ttorch.abs_(t) >>> t tensor([12, 0, 3]) >>> t = ttorch.tensor({ ... 'a': [12, 0, -3], ... 'b': {'x': [[-3, 1], [0, -2]]}, ... }) >>> ttorch.abs_(t) >>> t <Tensor 0x7f1c81d07ca0> ├── a --> tensor([12, 0, 3]) └── b --> <Tensor 0x7f1c81d07d30> └── x --> tensor([[3, 1], [0, 2]]) """ return torch.abs_(input)
65b32c91cf00a72b94b950d0e65cca71390b8c24
705,055
import textwrap def dedent(text): """Remove any common leading whitespace from every line in a given text.""" return textwrap.dedent(text)
514f9f41feac1c19ff92d6c9258bf54d7d3d7bd8
705,056
def enumerate_square(i, n): """ Given i in the range(n^2-n) compute a bijective mapping range(n^2-n) -> range(n)*range(n-1) """ row = int(i // (n-1)) col = int(i % (n-1)) if col >= row: col += 1 return row, col
93d3465c88a7bc9952161524fded4d7250131a65
705,057
import re def get_playback_time(playback_duration): """ Get the playback time(in seconds) from the string: Eg: PT0H1M59.89S """ # Get all the numbers in the string numbers = re.split('[PTHMS]', playback_duration) # remove all the empty strings numbers = [value for value in numbers if value != ''] numbers.reverse() total_duration = 0 for count, val in enumerate(numbers): if count == 0: total_duration += float(val) elif count == 1: total_duration += float(val) * 60 elif count == 2: total_duration += float(val) * 60 * 60 return total_duration
6a68c68ce465610b57626a725ac9c8889b527fdb
705,058
def _binary_array_to_hex(arr): """ internal function to make a hex string out of a binary array """ h = 0 s = [] for i, v in enumerate(arr.flatten()): if v: h += 2**(i % 8) if (i % 8) == 7: s.append(hex(h)[2:].rjust(2, '0')) h = 0 return "".join(s)
b705e4dc1dfc48f92f7c97dd7ba9d4dd4c4d0a98
705,059
def float_nsf(num, precision=17): """n-Significant Figures""" return ('{0:.%ie}' % (precision - 1)).format(float(num))
c2390b69364455adc6220e1e4aad81d7081bd5e4
705,060
def from_literal(tup): """Convert from simple literal form to the more uniform typestruct.""" def expand(vals): return [from_literal(x) for x in vals] def union(vals): if not isinstance(vals, tuple): vals = (vals,) v = expand(vals) return frozenset(v) if not isinstance(tup, tuple): return ('prim', tup) elif isinstance(tup[0], str): tag, *vals = tup if tag == 'prim': return tup elif tag == 'tuple': params = tuple(expand(vals)) return (tag, params) elif tag == 'map': k, v = vals return (tag, (union(k), union(v))) else: vals, = vals # pylint: disable=self-assigning-variable return (tag, union(vals)) else: return tuple(expand(tup))
a06d35e27512bfeae030494ca6cad7ebac5c7d2c
705,061
def print_result(error, real_word): """" print_result""" if error == 5: print("You lost!") print("Real word is:", real_word) else: print("You won!") return 0
598814ac64ac767c102080a0a82541d3b888843c
705,062
def mongo_convert(sch): """Converts a schema dictionary into a mongo-usable form.""" out = {} for k in sch.keys(): if k == 'type': out["bsonType"] = sch[k] elif isinstance(sch[k], list): out["minimum"] = sch[k][0] out["maximum"] = sch[k][1] elif isinstance(sch[k], dict): out[k] = mongo_convert(sch[k]) return out
0208ceda058042a9f44249a1b724c4b7883afec1
705,063
import math def discounted_cumulative_gain(rank_list): """Calculate the discounted cumulative gain based on the input rank list and return a list.""" discounted_cg = [] discounted_cg.append(rank_list[0]) for i in range(1, len(rank_list)): d = rank_list[i]/math.log2(i+1) dcg = d + discounted_cg[i-1] discounted_cg.append(dcg) return discounted_cg
eaa5ad6185e2abb239097be5399dffd82d143fd3
705,064
import sys import os def ancienne_fonction_chemin_absolu(relative_path): """ Donne le chemin absolu d'un fichier. PRE : - POST : Retourne ''C:\\Users\\sacre\\PycharmProjects\\ProjetProgra\\' + 'relative_path'. """ base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__))) print("b : ", base_path) correct = base_path.index("ProjetProgra") print("lll : ", base_path[:correct + 13]) return os.path.join(base_path[:-15], relative_path)
a33db91a2bd72273acc14caea415181297c16318
705,065
def get_form_field_names(form_class): """Return the list of field names of a WTForm. :param form_class: A `Form` subclass """ unbound_fields = form_class._unbound_fields if unbound_fields: return [f[0] for f in unbound_fields] field_names = [] # the following logic has been taken from FormMeta.__call__ for name in dir(form_class): if not name.startswith('_'): unbound_field = getattr(form_class, name) if hasattr(unbound_field, '_formfield'): field_names.append(name) return field_names
27c91a1e3c1b71f69d44747955d59cee525aa50e
705,066
def format_dnb_company_investigation(data): """ Format DNB company investigation payload to something DNBCompanyInvestigationSerlizer can parse. """ data['dnb_investigation_data'] = { 'telephone_number': data.pop('telephone_number', None), } return data
9c27990bad98b36649b42c20796caabeaae1e21b
705,067
def calculate_relative_enrichments(results, total_pathways_by_resource): """Calculate relative enrichment of pathways (enriched pathways/total pathways). :param dict results: result enrichment :param dict total_pathways_by_resource: resource to number of pathways :rtype: dict """ return { resource: len(enriched_pathways) / total_pathways_by_resource[resource] for resource, enriched_pathways in results.items() }
7060e032f2a619929cfcf123cf0946d7965b86de
705,069
def clean_name(name): """Clean a name string """ # flip if in last name, first name format tokens = name.split(',') if len(tokens) == 2: first, last = tokens[1], tokens[0] else: first, last = name.split(' ')[:2] # remove punctuation first_clean = first.strip().capitalize() last_clean = last.strip().capitalize() return f'{first_clean} {last_clean}'
ef5fe3e53ba1134c45c30f4b6342a0641e85f114
705,070
def diff_align(dfs, groupers): """ Align groupers to newly-diffed dataframes For groupby aggregations we keep historical values of the grouper along with historical values of the dataframes. The dataframes are kept in historical sync with the ``diff_loc`` and ``diff_iloc`` functions above. This function copies that functionality over to the secondary list of groupers. """ old = [] while len(dfs) < len(groupers): old.append(groupers.popleft()) if dfs: n = len(groupers[0]) - len(dfs[0]) if n: old.append(groupers[0][:n]) groupers[0] = groupers[0][n:] assert len(dfs) == len(groupers) for df, g in zip(dfs, groupers): assert len(df) == len(g) return old, groupers
2a92476cd913404b737dc941d51083f64ef70978
705,071
def load_targets_file(input_file): """ Takes a string indicating a file name and reads the contents of the file. Returns a list containing each line of the file. Precondition: input_file should exist in the file system. """ with open(input_file, 'r') as f: f = f.readlines() out = [i.replace('\n','').replace('\r','') for i in f] return out
40d305e244264d6c3249bb9fb914cda3ebcda711
705,072
def pega_salada_sobremesa_suco(items): """ Funcao auxiliar que popula os atributos salada, sobremesa e suco do cardapio da refeicao fornecida.""" alimentos = ["salada", "suco", "sobremesa"] cardapio = {} for alim in alimentos: tag = alim.upper() + ":" # tag para procurar o cardapio dos alimentos acima dentro do vetor items valor = [s.replace(tag, "") for s in items if tag in s][0] # pega o valor do alimento e ja tira a tag (e.g. "SOBREMESA:") cardapio[alim] = valor.capitalize() # lowercase eh melhor para exibir. items = [s for s in items if tag not in s] return cardapio, items
4ccf2907a4e828d1357e16e827ad587e4a50a287
705,073
def show_hidden_word(secret_word, old_letters_guessed): """ :param secret_word: :param old_letters_guessed: :return: String of the hidden word except the letters already guessed """ new_string = "" for letter in secret_word: if letter in old_letters_guessed: new_string = new_string + letter else: new_string = new_string + ' _ ' return new_string
2b3618619dcde2875da9dc8600be334e7aaadaad
705,074
def filter_packages(packages: list, key: str) -> list: """Filter out packages based on the given category.""" return [p for p in packages if p["category"] == key]
46f11f5a8269eceb9665ae99bdddfef8c62295a2
705,075
import os def create_splits(dataframe, split_path, n_splits = 10) : """ Should i reset index ? """ length = int(dataframe.shape[0] / int(n_splits)) for i in range(n_splits) : frame = dataframe.iloc[i*length:(i+1)*length] if i == n_splits-1 : frame = dataframe.iloc[i*length:] name = split_path + f'split-{i}.csv.gz' if os.path.exists(name) : print(f'File {name} already exits! Exiting...') return 0 print(frame.columns) frame = frame.drop(['Unnamed: 0'], axis = 1) frame.to_csv(name, index = False, compression = 'gzip')
fd6c8e31fe271a957028ff7471a1294a84ee62be
705,078
import os def get_data_filepath(filename): """Construct filepath for a file in the test/data directory Args: filename: name of file Returns: full path to file """ return os.path.join(os.path.dirname(__file__), 'data', filename)
d3d83cbf83d32b0252658f77b7bbb6fbdb99845f
705,079
def _column_number_to_letters(number): """ Converts given column number into a column letters. Right shifts the column index by 26 to find column letters in reverse order. These numbers are 1-based, and can be converted to ASCII ordinals by adding 64. Parameters ---------- number : int Column number to convert to column letters. Returns ------- unicode Column letters. References ---------- :cite:`OpenpyxlDevelopers2019` Examples -------- # Doctests skip for Python 2.x compatibility. >>> _column_number_to_letters(128) # doctest: +SKIP 'DX' """ assert 1 <= number <= 18278, ( 'Column number {0} must be in range [1, 18278]!'.format(number)) letters = [] while number > 0: number, remainder = divmod(number, 26) if remainder == 0: remainder = 26 number -= 1 letters.append(chr(remainder + 64)) return ''.join(reversed(letters))
c9a68bcd32c8f254af322bc61e447cfae61cb6d2
705,080
def _chunk_member_lag(chunk, repl_member_list, primary_optimedates, test_run_indices): """Helper function to compute secondary lag from values in a chunk :param collection.OrderedDict chunk: FTDC JSON chunk :param list[str] repl_member_list: list of all members in the replSet :param str primary: which member is the primary? :param list[int] primary_optimedates: optimeDate values for the primary :rtype: dict (lag values for the secondaries) """ collect_chunk_lag = {} for member in repl_member_list: member_optimedate_key = ('replSetGetStatus', 'members', member, 'optimeDate') if member_optimedate_key not in chunk: break member_optimedate_values = chunk[member_optimedate_key] member_lag = [] for index in test_run_indices: secondary_optimedate = member_optimedate_values[index] lag = primary_optimedates[index] - secondary_optimedate member_lag.append(lag) collect_chunk_lag[member] = member_lag return collect_chunk_lag
115ba53505d5bcbb9e0c1cdf0eab675fae73e568
705,081
def smallest_evenly_divisible(min_divisor, max_divisor, minimum_dividend=0): """Returns the smallest number that is evenly divisible (divisible with no remainder) by all of the numbers from `min_divisor` to `max_divisor`. If a `minimum_dividend` is provided, only dividends greater than this number will be evaluated. """ factors = range(max_divisor,0,-min_divisor) while True: counter = 0 for i in factors: if minimum_dividend % i != 0: break else: counter += 1 if counter == len(factors): return minimum_dividend minimum_dividend += 1
fa23d9a413a0909bfc05d7eb928aec8ade4cb06f
705,082
def factorial(n): """ Returns the factorial of n Parameters ---------- n : int denotes the non-negative integer for which factorial value is needed """ if(n<0): raise NotImplementedError( "Enter a valid non-negative integer" ) if(n==0 or n==1): return 1 elif(n==2): return 2 return n*factorial(n-1)
fe0b7100e1292d1e96daf18545d9fdfb931f9f74
705,083
def Divide(a, b): """Returns the quotient, or NaN if the divisor is zero.""" if b == 0: return float('nan') return a / float(b)
3ed0b07949bb802177e52bf8d04e9dfde92ab2de
705,084
import os def craft_item(): """Get craft item from environ varriable""" return os.environ.get('CRAFT_ITEM', None)
2f6f940ad83023dc21f68c2212c98c0e13d8d0e4
705,085
import ntpath def path_leaf(path): """ Extract the file name from a path. If the file ends with a slash, the basename will be empty, so the function to deal with it Parameters ---------- path : str Path of the file Returns ------- output : str The name of the file """ head, tail = ntpath.split(path) output = tail or ntpath.basename(head) return output
58930f081c2366b9084bb279d1b8b267e5f93c96
705,086
def focus_metric(data, merit_function='vollath_F4', **kwargs): """Compute the focus metric. Computes a focus metric on the given data using a supplied merit function. The merit function can be passed either as the name of the function (must be defined in this module) or as a callable object. Additional keyword arguments for the merit function can be passed as keyword arguments to this function. Args: data (numpy array) -- 2D array to calculate the focus metric for. merit_function (str/callable) -- Name of merit function (if in panoptes.utils.images) or a callable object. Returns: scalar: result of calling merit function on data """ if isinstance(merit_function, str): try: merit_function = globals()[merit_function] except KeyError: raise KeyError( "Focus merit function '{}' not found in panoptes.utils.images!".format(merit_function)) return merit_function(data, **kwargs)
c8f571e11202d39d8f331fca5fc93333aeb71e62
705,087
def get_canonical_import(import_set): """Obtain one single import from a set of possible sources of a symbol. One symbol might come from multiple places as it is being imported and reexported. To simplify API changes, we always use the same import for the same module, and give preference to imports coming from main tensorflow code. Args: import_set: (set) Imports providing the same symbol Returns: A module name to import """ # We use the fact that list sorting is stable, so first we convert the set to # a sorted list of the names and then we resort this list to move elements # not in core tensorflow to the end. import_list = sorted(import_set) import_list.sort(key=lambda x: 'lite' in x) return import_list[0]
ae53ca4d271ab543a7a13f1ce8240ce6eb328bbb
705,088
import torch from typing import Iterable def _tensor_in(tensor: torch.Tensor, iterable: Iterable[torch.Tensor]): """Returns whether `tensor is element` for any element in `iterable` This function is necessary because `tensor in iterable` does not work reliably for `Tensor`s. See https://discuss.pytorch.org/t/how-to-judge-a-tensor-is-in-a-list/15998/4 for further discussion. """ return any(tensor is elem for elem in iterable)
84ac8a129440c9c8d7785029b04bd403514a3bb9
705,089
def is_development_mode(registry): """ Returns true, if mode is set to development in current ini file. :param registry: request.registry :return: Boolean """ if 'mode' in registry.settings: return registry.settings['mode'].lower() == 'development' return False
af1b11fa69231a455406247b593f8ff49855bc3f
705,090
def float_fraction(trainpct): """ Float bounded between 0.0 and 1.0 """ try: f = float(trainpct) except ValueError: raise Exception("Fraction must be a float") if f < 0.0 or f > 1.0: raise Exception("Argument should be a fraction! Must be <= 1.0 and >= 0.0") return f
8eb28dcaa0ed9250f4aa68d668ad424b5b5eded5
705,091
def handle_internal(msg): """Process an internal message.""" internal = msg.gateway.const.Internal(msg.sub_type) handler = internal.get_handler(msg.gateway.handlers) if handler is None: return None return handler(msg)
0f5cae49cf5d36a5e161f88902c46af931fd622a
705,092
import codecs import json def _get_input_json(input_path): """ A really basic helper function to dump the JSON data. This is probably a leftover from when I was iterating on different reduce() functions. """ # Read in the input file. input_file = codecs.open(input_path, encoding="utf-8", mode="r") json_data = json.load(input_file) input_file.close() return json_data
5c91e77b2224435dbf17fcfc2351c574c173c6aa
705,093
import math def conv_float2negexp(val): """Returns the least restrictive negative exponent of the power 10 that would achieve the floating point convergence criterium *val*. """ return -1 * int(math.floor(math.log(val, 10)))
562ccf7d34f8034a25cabfb471e7fc2ab9c0feb6
705,096
import random def img_get_random_patch(img,w,h): """Get a random patch of a specific width and height from an image""" # Note that for this function it is the user's responsibility to ensure # the image size is big enough. We'll do an asertion to help but... # Figure out the maximum starting point within the image that max_x = img.shape[1] - w max_y = img.shape[0] - h # Make sure the size is big enough assert max_x >= 0, 'Trying to get a patch wider that the image width' assert max_y >= 0, 'Trying to get a patch higher that the image height' # Get a random starting point x = random.randint(0,max_x) y = random.randint(0,max_y) # Get the patch within the image image_patch = img[y:y+h,x:x+w, ...] # All done return image_patch
41ce199eb5ab8eb136f740eb2e1b495226510690
705,098
import os def expand_path(filename: str) -> str: """ Expands variables (user and environment) in a file name. :param filename: File name, possibly containing variables. :return: File name with variables expanded. """ return os.path.expandvars(os.path.expanduser(filename))
b6dae3491edbaa00a5f73959b2227ad2fe6f506e
705,099
def prepare_wiki_content(content, indented=True): """ Set wiki page content """ if indented: lines = content.split("\n") content = " ".join(i + "\n" for i in lines) return content
14daea5cdb509b333c2aead6dcb453a82e73ce8d
705,100
def default_browser(): """ Return the name of the default Browser for this system. """ return 'firefox'
a5df3959983bcc11fb59b0aea44a0e6ed42cc579
705,101
def splinter_remote_url(request): """Remote webdriver url. :return: URL of remote webdriver. """ return request.config.option.splinter_remote_url
17bf9bf3ebd7296a2305fe9edeb7168fbca7db10
705,102
import re def split_list_item_by_taking_words_in_parentheses(item): """This function goes through items in a list and creates a new item with only the words inside the parentheses.""" species_pop_name = item.split('(')[0].split(',') if len(species_pop_name) > 1: species_pop_name = species_pop_name[1][1:] + species_pop_name[0] else: species_pop_name = species_pop_name[0][:-1] binomial_nomenclature = re.findall(r'\([^()]*\)', item) binomial_nomenclature = [x.strip('()') for x in binomial_nomenclature] return species_pop_name, binomial_nomenclature[0]
2d8543611007e799d089c77b79ae7263cba36a30
705,103
def pad(value, digits, to_right=False): """Only use for positive binary numbers given as strings. Pads to the left by default, or to the right using to_right flag. Inputs: value -- string of bits digits -- number of bits in representation to_right -- Boolean, direction of padding Output: string of bits of length 'digits' Raises exception if value is larger than digits in length. Example: pad('0010', 6) -> '000010' pad('0010', 6, True) -> '001000' """ len_val = len(value) assert len_val <= digits rem_digits = digits - len_val if to_right: return value + "0"*rem_digits else: return "0"*rem_digits + value
98476653ccafeba0a9d81b9193de0687dbf9d85c
705,104
import tempfile import os def setup_directories( create_report_directory=True, create_publish_directory=False, temporary_work_directory=None, ): """ Setup a temporary directory, a report directory under it (created if necessary), and the publish directory (not created by default if necessary). """ if not temporary_work_directory: temporary_work_directory = tempfile.TemporaryDirectory() report_directory = os.path.join(temporary_work_directory.name, "report") if create_report_directory: os.makedirs(report_directory) publish_directory = os.path.join(temporary_work_directory.name, "publish") if create_publish_directory: os.makedirs(publish_directory) return temporary_work_directory, report_directory, publish_directory
c54db5354523653d87fc8baee0739b401fa5351b
705,105
def check_band_below_faint_limits(bands, mags): """ Check if a star's magnitude for a certain band is below the the faint limit for that band. Parameters ---------- bands : str or list Band(s) to check (e.g. ['SDSSgMag', 'SDSSiMag']. mags : float or list Magnitude(s) of the band(s) corresponding to the band(s) in the bands variable Returns ------- list : a new list of bands that are above the faint limit (ie - use-able bands) """ if isinstance(bands, str): bands = [bands] if isinstance(mags, float): mags = [mags] new_bands = [] for band, mag in zip(bands, mags): if 'SDSSgMag' in band and mag >= 24: continue elif 'SDSSrMag' in band and mag >= 24: continue elif 'SDSSiMag' in band and mag >= 23: continue elif 'SDSSzMag' in band and mag >= 22: continue else: new_bands.append(band) return new_bands
9e26fcef5bf79b4480e93a5fe9acd7416337cf09
705,106
def find_place_num(n, m): """ """ if n==1 or m==1: return 1 else: return find_place_num(n-1, m) + find_place_num(n, m-1)
632e06db2eb2e2eebdb1c5b34bea36124843a960
705,107
def add_license_creation_fields(license_mapping): """ Return an updated ``license_mapping`` of license data adding license status fields needed for license creation. """ license_mapping.update( is_active=False, reviewed=False, license_status="NotReviewed", ) return license_mapping
3856c434a672150c09af4b5e4c7fd9fa55014d5c
705,108
import sys def InputChecking(str_inputFileName_genotype, str_inputFileName_phenotype): """ To check the numbers of sample are consistent in genotype and phenotype data. Args: str_inputFileName_genotype (str): File name of input genotype data str_inputFileName_phenotype (str): File name of input phenotype data Returns: (tuple): tuple containing: - int_num_genotype (int): The sample number of genotype data - int_num_phenotype (int): The sample number of phenotype data """ ### check file name exist if str_inputFileName_genotype is None: sys.exit("There is no input genotype file.") if str_inputFileName_phenotype is None: sys.exit("There is no input phenotype file.") ### count lines of input files int_num_genotype = sum(1 for line in open(str_inputFileName_genotype)) int_num_phenotype = sum(1 for line in open(str_inputFileName_phenotype)) ### count sample num. in genotype file with open(str_inputFileName_genotype, 'r') as file_inputFile: list_line = file_inputFile.readline().strip().split(" ") int_num_genotype_sample = (len(list_line) - 5) / 3 if int_num_genotype_sample != int_num_phenotype: sys.exit("The number of samples in genotype file does not match the number of samples in phenotype file.") return int_num_genotype, int_num_phenotype
c1cf089a2018d2ab99f35e374f55626458698764
705,109
def lowercase(obj): """ Make dictionary lowercase """ if isinstance(obj, dict): return {k.lower(): lowercase(v) for k, v in obj.items()} elif isinstance(obj, (list, set, tuple)): t = type(obj) return t(lowercase(o) for o in obj) elif isinstance(obj, str): return obj.lower() else: return obj
08b0addd87ef7ba5c016ebee50790e8d5e31042b
705,110
import argparse import os def getOptions(): """Function to pull arguments""" parser = argparse.ArgumentParser(description="Removes samples from the design file" \ "belonging to the user-specified group(s).") # Standar Input standar = parser.add_argument_group(title="Standard input", description= "Standard input for SECIM tools.") standar.add_argument("-i","--input",dest="input", action='store', required=True, help="Input dataset in wide format.") standar.add_argument("-d","--design",dest="design", action='store', required=True, help="Design file.") standar.add_argument("-id","--uniqID",dest="uniqID",action="store", required=True, help="Name of the column with unique" \ "dentifiers.") standar.add_argument("-g","--group", dest="group", action='store', required=False, help="Name of column in design file" \ "with Group/treatment information.") # Tool Especific tool = parser.add_argument_group(title="Tool specific input", description= "Input that is especific for this tool.") tool.add_argument("-dp","--drops", dest="drops", action='store', required=True, help="Name of the groups in your"\ "group/treatment column that you want to remove from the design file.") # Output Paths output = parser.add_argument_group(title='Output paths', description="Paths for the output files") output.add_argument("-o","--out",dest="out",action="store", required=True,help="Output path for the new design file") args = parser.parse_args() # Standardize paths args.out = os.path.abspath(args.out) args.input = os.path.abspath(args.input) args.design = os.path.abspath(args.design) # Split groups/samples to drop args.drops = args.drops.split(",") return (args)
e86fda9acc65f90f968a3be2b2238370910b866b
705,111
def dequote(s): """ from: http://stackoverflow.com/questions/3085382/python-how-can-i-strip-first-and-last-double-quotes If a string has single or double quotes around it, remove them. Make sure the pair of quotes match. If a matching pair of quotes is not found, return the string unchanged. """ if (s[0] == s[-1]) and s.startswith(("'", '"')): return s[1:-1] return s
41c5e5fed901d70472dd6eef1ada7d53d395002c
705,113
def url_form(url): """Takes the SLWA photo url and returns the photo url. Note this function is heavily influenced by the format of the catalogue and could be easily broken if the Library switches to a different url structure. """ if url[-4:] != '.png' and url[-4:] != '.jpg': url = url + '.jpg' return url
7469850ffb6877ca116a28251d204024e15bc407
705,114
import os def _filepaths(directory, full_paths=True): """Get the filenames in the directory. Args: directory: Directory with the files full_paths: Give full paths if True Returns: result: List of filenames """ # Initialize key variables if bool(full_paths) is True: result = [ os.path.join(directory, filename) for filename in os.listdir( directory) if os.path.isfile( os.path.join(directory, filename))] else: result = [filename for filename in os.listdir( directory) if os.path.isfile(os.path.join(directory, filename))] return result
1a40cb2f3f940a911690f862fd8711d84d90fc94
705,115
def Rpivot(p, q, Mb): """ Given an augmented matrix Mb, Mb = M|b, this gives the output of the pivot entry [i, j] in or below row p, and in or to the right of column q. """ # n is the number of columns of M, which is one less than that of Mb. m = len(Mb) n = len(Mb[0]) - 1 # Initialize i, j to p, q, and we will not go above or leftwards of p, q. i = p j = q # Iterate through the columns of Mb to find its first nonzero column. for y in range(q, n): if [Mb[x][y] for x in range(p, m)] == [0] * (m - p): j = j + 1 else: break # Iterate through the rows of M from p to n-1. for x in range(p, n): # Adds one to row index i if column i is all zeros from column j # to column n. if Mb[x][j:n] == [0] * (n - j + 1): i = i + 1 else: break return [i, j]
155be98d8560bf42cea928e8b1da6e14e3e7762d
705,117
def compute_down(expr): """ Compute the expression on the entire inputs inputs match up to leaves of the expression """ return expr
71677a16093d82a28c1d153c9385b33c01b4dd24
705,118
from typing import Tuple def requests_per_process(process_count: int, conf) -> Tuple[int, int]: """Divides how many requests each forked process will make.""" return ( int(conf.concurrency / process_count), int(conf.requests / process_count), )
00af7a63471201c3fffcfb610f74a745ca326b68
705,120