content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
from operator import inv import numpy def normal_transform(matrix): """Compute the 3x3 matrix which transforms normals given an affine vector transform.""" return inv(numpy.transpose(matrix[:3,:3]))
b7f7256b9057b9a77b074080e698ff859ccbefb2
706,678
def username(request): """ Returns ESA FTP username """ return request.config.getoption("--username")
2393884c2c9f65055cd7a14c1b732fccf70a6e28
706,679
import re def is_valid_mac_address_normalized(mac): """Validates that the given MAC address has what we call a normalized format. We've accepted the HEX only format (lowercase, no separators) to be generic. """ return re.compile('^([a-f0-9]){12}$').match(mac) is not None
7c4ea0a3353a3753907de21bbf114b2a228bb3c0
706,680
import struct def parse_monitor_message(msg): """decode zmq_monitor event messages. Parameters ---------- msg : list(bytes) zmq multipart message that has arrived on a monitor PAIR socket. First frame is:: 16 bit event id 32 bit event value no padding Second frame is the endpoint as a bytestring Returns ------- event : dict event description as dict with the keys `event`, `value`, and `endpoint`. """ if len(msg) != 2 or len(msg[0]) != 6: raise RuntimeError("Invalid event message format: %s" % msg) event = { 'event': struct.unpack("=hi", msg[0])[0], 'value': struct.unpack("=hi", msg[0])[1], 'endpoint': msg[1], } return event
df71541d34bc04b1ac25c6435b1b298394e27362
706,681
import argparse def build_arg_parser(): """Build the ArgumentParser.""" parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("-f", "--fritzbox", default="fritz.box") parser.add_argument("-u", "--username", default="dslf-config") parser.add_argument("-p", "--password", required=True) return parser
acf1baafdedfa8db7328e095eac5324f4ddae1ee
706,682
def bootstrap_alert(visitor, items): """ Format: [[alert(class=error)]]: message """ txt = [] for x in items: cls = x['kwargs'].get('class', '') if cls: cls = 'alert-%s' % cls txt.append('<div class="alert %s">' % cls) if 'close' in x['kwargs']: txt.append('<button class="close" data-dismiss="alert">&times;</button>') text = visitor.parse_text(x['body'], 'article') txt.append(text) txt.append('</div>') return '\n'.join(txt)
c2803176b2e1ed9b3d4aecd622eedcac673d4c42
706,683
def aspectRatioFix(preserve,anchor,x,y,width,height,imWidth,imHeight): """This function helps position an image within a box. It first normalizes for two cases: - if the width is None, it assumes imWidth - ditto for height - if width or height is negative, it adjusts x or y and makes them positive Given (a) the enclosing box (defined by x,y,width,height where x,y is the \ lower left corner) which you wish to position the image in, and (b) the image size (imWidth, imHeight), and (c) the 'anchor point' as a point of the compass - n,s,e,w,ne,se etc \ and c for centre, this should return the position at which the image should be drawn, as well as a scale factor indicating what scaling has happened. It returns the parameters which would be used to draw the image without any adjustments: x,y, width, height, scale used in canvas.drawImage and drawInlineImage """ scale = 1.0 if width is None: width = imWidth if height is None: height = imHeight if width<0: width = -width x -= width if height<0: height = -height y -= height if preserve: imWidth = abs(imWidth) imHeight = abs(imHeight) scale = min(width/float(imWidth),height/float(imHeight)) owidth = width oheight = height width = scale*imWidth-1e-8 height = scale*imHeight-1e-8 if anchor not in ('nw','w','sw'): dx = owidth-width if anchor in ('n','c','s'): x += dx/2. else: x += dx if anchor not in ('sw','s','se'): dy = oheight-height if anchor in ('w','c','e'): y += dy/2. else: y += dy return x,y, width, height, scale
73a686f122ad31ee6693641e1ef386f13b67b4d8
706,684
import math def atan2(y, x): """Returns angle of a 2D coordinate in the XY plane""" return math.atan2(y, x)
ede5a647c175bebf2800c22d92e396deff6077e2
706,685
def get_column(data, column_index): """ Gets a column of data from the given data. :param data: The data from the CSV file. :param column_index: The column to copy. :return: The column of data (as a list). """ return [row[column_index] for row in data]
3fd5c8c76ccfed145aba0e685aa57ad01b3695a5
706,686
from typing import List import logging def get_vocab(iob2_files:List[str]) -> List[str]: """Retrieve the vocabulary of the iob2 annotated files Arguments: iob2_files {List[str]} -- List of paths to the iob2 annotated files Returns: List[str] -- Returns the unique list of vocabulary found in the files """ vocab = set() for iob2_file in iob2_files: logging.info("Loading file %s for creating corpus embeddings", iob2_file) for line in open(iob2_file): token = line.split("\t")[0] vocab.add(token) return list(vocab)
0dc2a1f969ed6f92b36b1b31875c855d5efda2d9
706,687
import numpy def taylor_green_vortex(x, y, t, nu): """Return the solution of the Taylor-Green vortex at given time. Parameters ---------- x : numpy.ndarray Gridline locations in the x direction as a 1D array of floats. y : numpy.ndarray Gridline locations in the y direction as a 1D array of floats. t : float Time value. nu : float Coefficient of viscosity. Returns ------- numpy.ndarray x-component of the velocity field as a 2D array of floats. numpy.ndarray y-component of the velocity field as a 2D array of floats. numpy.ndarray pressure field as a 2D array of floats. """ X, Y = numpy.meshgrid(x, y) a = 2 * numpy.pi u = -numpy.cos(a * X) * numpy.sin(a * Y) * numpy.exp(-2 * a**2 * nu * t) v = +numpy.sin(a * X) * numpy.cos(a * Y) * numpy.exp(-2 * a**2 * nu * t) p = (-0.25 * (numpy.cos(2 * a * X) + numpy.cos(2 * a * Y)) * numpy.exp(-4 * a**2 * nu * t)) return u, v, p
f47f4cdf11b81fe8b8c38ae50d708ec4361f7098
706,688
import torch def conj(x): """ Calculate the complex conjugate of x x is two-channels complex torch tensor """ assert x.shape[-1] == 2 return torch.stack((x[..., 0], -x[..., 1]), dim=-1)
b22cfd3f12759f9b237099ca0527f0cbe9b99348
706,689
def update_schema(schema_old, schema_new): """ Given an old BigQuery schema, update it with a new one. Where a field name is the same, the new will replace the old. Any new fields not present in the old schema will be added. Arguments: schema_old: the old schema to update schema_new: the new schema which will overwrite/extend the old """ old_fields = schema_old["fields"] new_fields = schema_new["fields"] output_fields = list(old_fields) field_indices = {field["name"]: i for i, field in enumerate(output_fields)} for field in new_fields: name = field["name"] if name in field_indices: # replace old field with new field of same name output_fields[field_indices[name]] = field else: # add new field output_fields.append(field) return {"fields": output_fields}
e97827ac0d8ee943b88fc54506af3f6fc8285d71
706,690
def parse_flarelabels(label_file): """ Parses a flare-label file and generates a dictionary mapping residue identifiers (e.g. A:ARG:123) to a user-specified label, trees that can be parsed by flareplots, and a color indicator for vertices. Parameters ---------- label_file : file A flare-label file where each line contains 2-3 columns formatted as - CHAIN:RESN:RESI (e.g. A:ARG:123) - [[TOPLEVEL.]MIDLEVEL.]LABEL (e.g. Receptor.Helix2.2x44) - COLOR (e.g. #FF0000 or white) Returns ------- dict of str : (dict of str : str) Keys are all residue identifiers and values are dicts that hold both the LABEL by itself (key "label", the full tree-path (key "treepath") and a CSS-compatible color string (key "color"). Raises ------ AssertionError if a residue identifier (CHAIN:RESN:RESI) is specified twice in the file, or if a LABEL appears twice. """ if label_file is None: return None ret = {} flarelabels = set() # Only used to check for duplicates for line in label_file: line = line.strip() if not line: continue # Ignore empty lines columns = line.split("\t") residentifier = columns[0] flaretreepath = columns[1] if len(columns) > 1 else columns[0] flarelabel = flaretreepath.split(".")[-1] flarecolor = columns[2] if len(columns) > 2 else "white" if residentifier in ret: raise AssertionError("Residue identifier '"+residentifier+"' appears twice in "+label_file.name) if flarelabel in flarelabels: raise AssertionError("Flare label '"+flarelabel+"' used twice in "+label_file.name) ret[residentifier] = {"label": flarelabel, "treepath": flaretreepath, "color": flarecolor} flarelabels.add(flarelabel) return ret
23df49af14af720311b320f65894e995983365bf
706,691
def get_hmm_datatype(query_file): """Takes an HMM file (HMMer3 software package) and determines what data type it has (i.e., generated from an amino acid or nucleic acid alignment). Returns either "prot" or "nucl". """ datatype = None with open(query_file) as infh: for i in infh: if i.startswith('ALPH'): dname = i.strip().split(' ')[1] if dname == 'amino': datatype = 'prot' elif dname == 'DNA': datatype = 'nucl' break # Check that it worked. assert datatype is not None, """Error: Data type could not be determined for input file: %s""" % query_file # Return the data type. return datatype
27653784b8a9fbae92226f8ea7d7b6e2b647765e
706,692
def detect_min_threshold_outliers(series, threshold): """Detects the values that are lower than the threshold passed series : series, mandatory The series where to detect the outliers threshold : integer, float, mandatory The threshold of the minimum value that will be considered outliers. """ bool_outliers = series < threshold return bool_outliers
6032693341073d101c0aad598a105f6cbc0ec578
706,693
def obtain_bboxs(path) -> list: """ obatin bbox annotations from the file """ file = open(path, "r") lines = file.read().split("\n") lines = [x for x in lines if x and not x.startswith("%")] lines = [x.rstrip().lstrip() for x in lines] # get rid of fringe whitespaces bboxs = [] for line in lines: items = line.split(" ") bboxs.append([items[0], float(items[1]), float(items[2]), float(items[3]), float(items[4])]) return bboxs
75ceaac4bd8500320007d2ffb4cf4c490bd29473
706,694
import argparse def parse_arguments(): """ Parse arguments """ parser = argparse.ArgumentParser() parser.add_argument( "-i", type=str, dest="input_pics", help="A file consists of pics path with each pic on a single line.", ) parser.add_argument("-o", type=str, dest="output_gif", help="Output gif path.") parser.add_argument("-fps", type=float, dest="fps", help="FPS.") parser.add_argument( "-duration", type=float, dest="duration", help="Duration of each frame." ) return parser.parse_args()
8956c690bfffbe2e93c40c98db0eb785ff440530
706,696
def return_next_entry_list_uri(links): """続くブログ記事一覧のエンドポイントを返す""" for link in links: if link.attrib["rel"] == "next": return link.attrib["href"]
0c4c4139270ef8dedbb106f2db852097f4cd3028
706,697
def none(**_): """ Input: anything Return: 0.0 (float) Descr.: Dummy method to handle no temperature correction""" return 0.0
e06b22f91d5a73450ddb4ca53fbb2569d567dcf1
706,698
def get_custom_headers(manifest_resource): """Generates the X-TAXII-Date-Added headers based on a manifest resource""" headers = {} times = sorted(map(lambda x: x["date_added"], manifest_resource.get("objects", []))) if len(times) > 0: headers["X-TAXII-Date-Added-First"] = times[0] headers["X-TAXII-Date-Added-Last"] = times[-1] return headers
6c3acf2ea330b347387bfec574b4f8edfffa69ab
706,699
def checkCulling( errs, cullStrings ) : """ Removes all messages containing sub-strings listed in cullStrings. cullStrings can be either a string or a list of strings. If as list of strings, each string must be a sub-string in a message for the message to be culled. """ def checkCullingMatch( message, cullStrings ) : found = True for cullString in cullStrings : found = found and ( cullString in message ) return( found ) def checkCulling2( message, cullStrings, level = 0 ) : if( isinstance( message, list ) ) : messages = [] for msg in message : msg1 = checkCulling2( msg, cullStrings, level + 1 ) if( msg1 is not None ) : messages.append( msg1 ) if( len( messages ) < 2 ) : messages = None return( messages ) else : if( checkCullingMatch( message, cullStrings ) ) : return( None ) return( message ) if( isinstance( cullStrings, str ) ) : cullStrings = [ cullStrings ] errs2 = [] for err in errs : messages = [] if( isinstance( err.message, str ) ) : if( not( checkCullingMatch( err.message, cullStrings ) ) ) : errs2.append( err ) else : for message in err.message : message = checkCulling2( message, cullStrings ) if( message is not None ) : messages.append( message ) if( len( messages ) > 0 ) : err.message = messages errs2.append( err ) return( errs2 )
5414e52df999a8aef7ed34328a689efa1582aabb
706,700
import csv def read_pinout_csv(csv_file, keyname="number"): """ read a csv file and return a dict with the given keyname as the keys """ reader = csv.DictReader(open(csv_file)) lst = [] for row in reader: lst.append(row) d = {} for item in lst: d[item[keyname]] = item return d
07a30b1191d311fee315c87773e3b3c1111d7624
706,701
def findall(element, path): """ A helper function around a :attr:`lxml.etree._Element.findall` that passes the element's namespace mapping. """ return element.findall(path, namespaces=element.nsmap)
20da8cb66ac591751501e5c944f6f95235582e80
706,702
import os def read_file(file_dir: str, filename: str) -> bytes: """ Read file contents to bytes """ with open(os.path.join(file_dir, filename), "rb") as f: data = f.read() return data
b0e8207a25d6dd85fcd4701696d40146282a6095
706,703
import collections def _parse_voc_xml(node): """ Extracted from torchvision """ voc_dict = {} children = list(node) if children: def_dic = collections.defaultdict(list) for dc in map(_parse_voc_xml, children): for ind, v in dc.items(): def_dic[ind].append(v) if node.tag == 'annotation': def_dic['object'] = [def_dic['object']] voc_dict = { node.tag: {ind: v[0] if len(v) == 1 else v for ind, v in def_dic.items()} } if node.text: text = node.text.strip() if not children: voc_dict[node.tag] = text return voc_dict
58ef998cdf36ce4620042736ff27fc06d4c277a6
706,704
def data_to_CCA(dic, CCA): """ Returns a dictionary of ranking details of each CCA {name:{placeholder:rank} """ final_dic = {} dic_CCA = dic[CCA][0] #the cca sheet for key, value in dic_CCA.items(): try: #delete all the useless info del value["Class"] except KeyError: del value["CLASS"] try: del value["Category"] except: pass final_dic[key] = value try: del final_dic["Name"] except KeyError: pass return final_dic
fedd8a55e4310c024ede4f474c89463f71a0ecb6
706,705
def cell_info_for_active_cells(self, porosity_model="MATRIX_MODEL"): """Get list of cell info objects for current case Arguments: porosity_model(str): String representing an enum. must be 'MATRIX_MODEL' or 'FRACTURE_MODEL'. Returns: List of **CellInfo** objects **CellInfo class description**:: Parameter | Description | Type ------------------------- | --------------------------------------------- | ----- grid_index | Index to grid | Integer parent_grid_index | Index to parent grid | Integer coarsening_box_index | Index to coarsening box | Integer local_ijk | Cell index in IJK directions of local grid | Vec3i parent_ijk | Cell index in IJK directions of parent grid | Vec3i **Vec3i class description**:: Parameter | Description | Type ---------------- | -------------------------------------------- | ----- i | I grid index | Integer j | J grid index | Integer k | K grid index | Integer """ active_cell_info_chunks = self.cell_info_for_active_cells_async( porosity_model=porosity_model ) received_active_cells = [] for active_cell_chunk in active_cell_info_chunks: for active_cell in active_cell_chunk.data: received_active_cells.append(active_cell) return received_active_cells
f2b211e72bc5c2f651d67fa383dc750b6b9f5c5a
706,706
import torch def pick_action(action_distribution): """action selection by sampling from a multinomial. Parameters ---------- action_distribution : 1d torch.tensor action distribution, pi(a|s) Returns ------- torch.tensor(int), torch.tensor(float) sampled action, log_prob(sampled action) """ m = torch.distributions.Categorical(action_distribution) a_t = m.sample() return a_t
ac7ceb0df860876ec209563eaa6bdd3f8bd09189
706,707
from typing import List def word_tokenizer(text: str) -> List[str]: """Tokenize input text splitting into words Args: text : Input text Returns: Tokenized text """ return text.split()
dc6e4736d7a1f564bcfc6fed081a1869db38eea5
706,708
import functools import warnings import traceback def catch_exceptions(warning_msg="An exception was caught and ignored.", should_catch=True): """Decorator that catches exceptions.""" def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): if not should_catch: return func(*args, **kwargs) try: return func(*args, **kwargs) except: warnings.warn(warning_msg + "\nDetailed error log: " + traceback.format_exc()) return wrapper return decorator
4242512d6416ecd97ef0c241d0d719fcdaedd797
706,709
def test_translate_six_frames(seq_record): """ Given a Biopython sequence record with a DNA (or RNA?) sequence, translate into amino acid (protein) sequences in six frames. Returns translations as list of strings. """ translation_list = [] for strand, nuc in [(+1, seq_record.seq), (-1, seq_record.seq.reverse_complement())]: print("Strand: %s\nNuc: %s" % (strand, nuc)) for frame in range(3): print("Frame: %s" % frame) length = 3 * ((len(seq_record)-frame) // 3) print("Length: %s" % length) print("Possible translations: %s" % nuc[frame:frame+length].translate()) for pro in nuc[frame:frame+length].translate().split("*"): translation_list.append(pro) return(translation_list)
ce230ee2d8c48d55b269b89e828782456389fc39
706,710
def filter_params(params): """Filter the dictionary of params for a Bountysource account. This is so that the Bountysource access token doesn't float around in a user_info hash (considering nothing else does that). """ whitelist = ['id', 'display_name', 'first_name', 'last_name', 'email', 'avatar_url'] filtered_params = {} for key in params: if key in whitelist: filtered_params[key] = params[key] return filtered_params
d471ecfa413f6a6821202f14a2506a89e55353b2
706,711
def to_bits_string(value: int) -> str: """Converts unsigned value to a bit string with _ separators every nibble.""" if value < 0: raise ValueError(f'Value is not unsigned: {value!r}') bits = bin(value)[2:] rev = bits[::-1] pieces = [] i = 0 while i < len(rev): pieces.append(rev[i:i + 4]) i += 4 return '0b' + '_'.join(pieces)[::-1]
07dea253378686a1c65c97fad3d0b706e02335c4
706,712
from typing import Counter def remove_unpopulated_classes(_df, target_column, threshold): """ Removes any row of the df for which the label in target_column appears less than threshold times in the whole frame (not enough populated classes) :param df: The dataframe to filter :param target_column: The target column with labels :param threshold: the number of appearances a label must respect :return: The filtered dataframe """ count = Counter(_df[target_column]) valid = [k for k in count.keys() if count[k] >= threshold] _df = _df[_df[target_column].isin(valid)] return _df
2ed31cfd3883a3856501dabff935028824141181
706,713
def __factor(score, items_sum, item_count): """Helper method for the pearson correlation coefficient algorithm.""" return score - items_sum/item_count
2f92b4a5be4375e3083ace9b3855a170f7372460
706,714
from pathlib import Path import sys def get_img_path() -> Path: """ Gets the path of the Mac installation image from the command line arguments. Fails with an error if the argument is not present or the given file doesn't exist. """ args = sys.argv if len(args) < 2: sys.exit( "Please provide the path to the Fuzzlecheck image as argument." ) if len(args) > 2: sys.exit("More arguments provided than needed") rsl = Path(args[1]) if not rsl.exists(): sys.exit("Given dmg image ({}) doesn't exist.".format(args[1])) return rsl
2ba4ac7f7954cbe2ed76824da41a13b8e6d0b4d9
706,715
def find_allergens(ingredients): """Return ingredients with cooresponding allergen.""" by_allergens_count = sorted(ingredients, key=lambda i: len(ingredients[i])) for ingredient in by_allergens_count: if len(ingredients[ingredient]) == 1: for other_ingredient, allergens in ingredients.items(): if ingredient == other_ingredient: continue ingredients[other_ingredient] = (allergens - ingredients[ingredient]) return { ingredient: allergen.pop() for ingredient, allergen in ingredients.items() }
b5fde42cae0138f3bd819eb60629bb2d7ddf2f38
706,716
def check_all_flash(matrix_2d): """ Check if all octopuses flashed. :param matrix_2d: 2D matrix :return: Boolean """ for line in matrix_2d: for digit in line: if digit != 0: return True return False
9dca0174cd0272773e9b9330977bd3fac86f413a
706,717
def cache_get(cache, key, fcn, force=False): """Get key from cache, or compute one.""" if cache is None: cache = {} if force or (key not in cache): cache[key] = fcn() return cache[key]
b358bf01dc657d8cd983830d18ef5a85a48d69ec
706,718
def _BreadthFirstSearch(to_visit, children, visited_key=lambda x: x): """Runs breadth first search starting from the nodes in |to_visit| Args: to_visit: the starting nodes children: a function which takes a node and returns the nodes adjacent to it visited_key: a function for deduplicating node visits. Defaults to the identity function (lambda x: x) Returns: A list of nodes which are reachable from any node in |to_visit| by calling |children| any number of times. """ to_visit = list(to_visit) seen = set(map(visited_key, to_visit)) for node in to_visit: for child in children(node): key = visited_key(child) if key not in seen: seen.add(key) to_visit.append(child) return to_visit
1c7153f61af81bb4bd9a06e0213bfcee4aab5cb8
706,719
def fileexists(filename): """Replacement method for os.stat.""" try: f = open( filename, 'r' ) f.close() return True except: pass return False
126460a04e7a8faf7517cb46c480670f5a067b1a
706,720
def test_knowledge_graph_init(graph_mutation_client, graph_mutation_responses): """Test knowldge graph client initialization.""" return graph_mutation_client.named_types
932a425e4bbdc301ef223e0f91936ecacf3bd5aa
706,721
import pathlib def stem(path: str) -> str: """returns the stem of a path (path without parent directory and without extension) e.g j.sals.fs.stem("/tmp/tmp-5383p1GOmMOOwvfi.tpl") -> 'tmp-5383p1GOmMOOwvfi' Args: path (str): path we want to get its stem Returns: str: path without parent directory and without extension """ return pathlib.Path(path).stem
ec7507becb31bda7662122668b490148ca15d347
706,722
def find_defender(ships, side): """Crude method to find something approximating the best target when attacking""" enemies = [x for x in ships if x['side'] != side and x['hp'] > 0] if not enemies: return None # shoot already wounded enemies first wounded = [x for x in enemies if x['hp'] < x['size']] if wounded: found = ships.index(wounded[0]) return found # shoot boarders in priority (?) boarding = [x for x in enemies if 'Boarding' in x['name']] if boarding: found = ships.index(boarding[0]) return found # shoot 1 hp ships hp_1 = [x for x in enemies if x['size'] == 1] if hp_1: found = ships.index(hp_1[0]) return found # shoot 2 hp ships hp_2 = [x for x in enemies if x['size'] == 2] if hp_2: found = ships.index(hp_2[0]) return found # otherwise just shoot the first one (??!) found = ships.index(enemies[0]) return found
4c58ec01abae1f59ced47e61e257abe7f8923aea
706,723
import os def get_parameters(image_path): """ Parses the image path to dictionary :param str image_path: image path :rtype dict """ image_path = image_path image_directory = os.path.dirname(image_path) image_filename = os.path.basename(image_path) image_name = image_filename.split('.')[0] image_extension = image_filename.split('.')[-1] return { 'directory': image_directory, 'extension': image_extension, 'name': image_name, 'filename': image_filename, 'path': image_path }
cffef64001c81fd3459d49239b1ce863c546b6ca
706,724
def and_sum (phrase): """Returns TRUE iff every element in <phrase> is TRUE""" for x in phrase: if not x: return False return True
d65953c5811aedef0a7c76cd3191aba8236f02fa
706,725
from datetime import datetime def get_stay(admission_date, exit_date): """Method to get exit date.""" try: if not exit_date: exit_date = datetime.now().date() no_days = exit_date - admission_date # Get More years = ((no_days.total_seconds()) / (365.242 * 24 * 3600)) years_int = int(years) months = (years - years_int) * 12 months_int = int(months) days = (months - months_int) * (365.242 / 12) days_int = int(days) years_val = '' if years_int == 0 else '%s years ' % (years_int) mon_check = years_int > 0 and months_int > 0 months_val = '%s months ' % (months_int) if mon_check else '' pds = '%s%s%s days' % (years_val, months_val, days_int) except Exception as e: print('Error calculating exit - %s' % str(e)) return None else: return pds
bba3aa63884608500793c9f97c0c6e0e5d9e69ef
706,726
def get_index_freq(freqs, fmin, fmax): """Get the indices of the freq between fmin and fmax in freqs """ f_index_min, f_index_max = -1, 0 for freq in freqs: if freq <= fmin: f_index_min += 1 if freq <= fmax: f_index_max += 1 # Just check if f_index_max is not out of bound f_index_max = min(len(freqs) - 1, f_index_max) f_index_min = max(0, f_index_min) return f_index_min, f_index_max
f3e014626d763f18ce6b661cabeb244bfabe9782
706,727
def divideByFirstColumn(matrix): """This function devide a matrix by its first column to resolve wrong intemsity problems""" result = (matrix.T / matrix.sum(axis=1)).T return result
348bbaa1a3c16a42be90978a0fcc65b1a7daf557
706,728
import random def superpixel_colors( num_pix:int = 1536, schema:str = 'rgb', interleave:int = 1, stroke:str = '', ) -> list: """ Generate color (attribute) list for superpixel SVG paths Parameters ---------- num_pix : int Number of super pixels to account for (default = 1536) schema : str Either of 'rgb' or 'random' interleave : int RGB interleave value (default = 1) stroke : str String that is inserted into ever attribute at the end, e.g. to account for a stroke, such as 'stroke="#808080"'. Please note that the entire tag=value (pairs) must be given! Returns ------- colors : list List of attributes suitable for superpixel_outlines (SVG) """ colors = [''] * num_pix if not schema in ['random', 'rgb']: raise ValueError('invalid schema requested.') if schema == 'rgb': if stroke: for idx in range(num_pix): val = interleave * idx colors[idx] = 'fill="#{0:02x}{1:02x}{2:02x}" {3:s}'.format( val % 256, (val // 256) % 256, (val // 65536) % 256, stroke) else: for idx in range(num_pix): val = interleave * idx colors[idx] = 'fill="#{0:02x}{1:02x}{2:02x}"'.format( val % 256, (val // 256) % 256, (val // 65536) % 256) else: # IMPORT DONE HERE TO SAVE TIME AT MODULE INIT if stroke: for idx in range(num_pix): colors[idx] = 'fill="#{0:06x} {1:s}"'.format( random.randrange(16777216), stroke) else: for idx in range(num_pix): colors[idx] = 'fill="#{0:06x}"'.format( random.randrange(16777216)) return colors
7a574b48dff30126052c2acd5d06e01a9f4a9af0
706,729
import argparse def _get_args(): """ Parses the command line arguments and returns them. """ parser = argparse.ArgumentParser(description=__doc__) # Argument for the mode of execution (human or random): parser.add_argument( "--mode", "-m", type=str, default="human", choices=["human", 'random'], help="The execution mode for the game.", ) return parser.parse_args()
fdccca9d6ba518d7b3c1732070667b0b82018fc5
706,730
import importlib def write(*args, package="gw", file_format="dat", **kwargs): """Read in a results file. Parameters ---------- args: tuple all args are passed to write function package: str the package you wish to use file_format: str the file format you wish to use. Default None. If None, the read function loops through all possible options kwargs: dict all kwargs passed to write function """ def _import(package, file_format): """Import format module with importlib """ return importlib.import_module( "pesummary.{}.file.formats.{}".format(package, file_format) ) def _write(module, file_format, args, kwargs): """Execute the write method """ return getattr(module, "write_{}".format(file_format))(*args, **kwargs) if file_format == "h5": file_format = "hdf5" try: module = _import(package, file_format) return _write(module, file_format, args, kwargs) except (ImportError, AttributeError, ModuleNotFoundError): module = _import("core", file_format) return _write(module, file_format, args, kwargs)
b7246e035f13b60fc8047abd768fae4bb1937600
706,732
import csv def compute_list(commandline_argument): """ Returns a list of booking or revenue data opening booking data file with first parameter """ # utf-8_sig # Open booking CSV and read everything into memory with open(commandline_argument, "r", encoding="shift_jis") as database: data = csv.reader(database) next(data) list_data = list(data) return list_data
115725f5ab35c04412a2fe4f982a72c6b2e4c297
706,733
import codecs def debom(s): """ 此函数是去除字符串中bom字符, 由于此字符出现再文件的头位置 所以对csv 的header造成的影响, 甚至乱码 通过此函数可以避免这种情况 """ boms = [ k for k in dir(codecs) if k.startswith('BOM') ] for bom in boms: s = s.replace(getattr(codecs, bom), '') return s
4ac056a8ba93f00a0a31e3a447e62810c5b68687
706,734
def load_sentences(filename): """give us a list of sentences where each sentence is a list of tokens. Assumes the input file is one sentence per line, pre-tokenized.""" out = [] with open(filename) as infile: for line in infile: line = line.strip() tokens = line.split() out.append(tokens) return out
6a4c458f9a0d9b17eaa38c38570dacc4c40e86c0
706,735
import sys def get_internal_modules(key='exa'): """ Get a list of modules belonging to the given package. Args: key (str): Package or library name (e.g. "exa") """ key += '.' return [v for k, v in sys.modules.items() if k.startswith(key)]
d97618ba37ad403a74fc13a7587c6369fab540fb
706,736
def precision_and_recall_at_k(ground_truth, prediction, k=-1): """ :param ground_truth: :param prediction: :param k: how far down the ranked list we look, set to -1 (default) for all of the predictions :return: """ if k == -1: k = len(prediction) prediction = prediction[0:k] numer = len(set(ground_truth).intersection(set(prediction))) prec = numer / k recall = numer / len(ground_truth) return prec, recall
cf8543279c6d7874f99c5badeb3064b621fa36a4
706,737
def tar_cat(tar, path): """ Reads file and returns content as bytes """ mem = tar.getmember(path) with tar.extractfile(mem) as f: return f.read()
f07f00156c34bd60eea7fcae5d923ea9f1650f6f
706,738
def __get_base_name(input_path): """ /foo/bar/test/folder/image_label.ext --> test/folder/image_label.ext """ return '/'.join(input_path.split('/')[-3:])
5df2ef909f4b570cf6b6224031ad705d16ffff42
706,739
def or_ipf28(xpath): """change xpath to match ipf <2.8 or >2.9 (for noise range)""" xpath28 = xpath.replace('noiseRange', 'noise').replace('noiseAzimuth', 'noise') if xpath28 != xpath: xpath += " | %s" % xpath28 return xpath
7bf508c48d5a6fc09edba340e2bfc9ec13513fc8
706,740
def unf_gas_density_kgm3(t_K, p_MPaa, gamma_gas, z): """ Equation for gas density :param t_K: temperature :param p_MPaa: pressure :param gamma_gas: specific gas density by air :param z: z-factor :return: gas density """ m = gamma_gas * 0.029 p_Pa = 10 ** 6 * p_MPaa rho_gas = p_Pa * m / (z * 8.31 * t_K) return rho_gas
6e41802367bbe70ab505ae5db89ee3e9a32e7d7c
706,741
import os def availible_files(path:str, contains:str='') -> list: """Returns the availible files in directory Args: path(str): Path to directory contains(str, optional): (Default value = '') Returns: Raises: """ return [f for f in os.listdir(path) if contains in f]
c43db03d06d849c017382daee9715c8dde91b61d
706,742
def scale(value, upper, lower, min_, max_): """Scales value between upper and lower values, depending on the given minimun and maximum value. """ numerator = ((lower - upper) * float((value - min_))) denominator = float((max_ - min_)) return numerator / denominator + upper
3e13c80b765cffb1e75a6856d343bd9a88c353e9
706,743
def Flatten(nmap_list): """Flattens every `.NestedMap` in nmap_list and concatenate them.""" ret = [] for x in nmap_list: ret += x.Flatten() return ret
c630869b725d69338830e1a14ef920d6d1e87ade
706,744
def left_index_iter(shape): """Iterator for the left boundary indices of a structured grid.""" return range(0, shape[0] * shape[1], shape[1])
c7da6f5de48d0446cb0729593d3dc0eb95f5ab9a
706,745
def add_numbers(a, b): """Sums the given numbers. :param int a: The first number. :param int b: The second number. :return: The sum of the given numbers. >>> add_numbers(1, 2) 3 >>> add_numbers(50, -8) 42 """ return a + b
7d9a0c26618a2aee5a8bbff6a65e315c33594fde
706,746
def area(a, indices=(0, 1, 2, 3)): """ :param a: :param indices: :return: """ x0, y0, x1, y1 = indices return (a[..., x1] - a[..., x0]) * (a[..., y1] - a[..., y0])
17df4d4f4ad818be0b2ed7a1fe65aaeccbe63638
706,747
def convert_acl_to_iam_policy(acl): """Converts the legacy ACL format to an IAM Policy proto.""" owners = acl.get('owners', []) readers = acl.get('readers', []) if acl.get('all_users_can_read', False): readers.append('allUsers') writers = acl.get('writers', []) bindings = [] if owners: bindings.append({'role': 'roles/owner', 'members': owners}) if readers: bindings.append({'role': 'roles/viewer', 'members': readers}) if writers: bindings.append({'role': 'roles/editor', 'members': writers}) return {'bindings': bindings}
990cdb6a51a696cf2b7825af94cf4265b2229be9
706,748
def default_to(default, value): """ Ramda implementation of default_to :param default: :param value: :return: """ return value or default
58338f67332a0ff116cd2ff46d65ee92bf59c360
706,749
import re def convert_to_snake_case(string: str) -> str: """Helper function to convert column names into snake case. Takes a string of any sort and makes conversions to snake case, replacing double- underscores with single underscores.""" s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', string) draft = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() return draft.replace('__', '_')
2a8de69a6915e87e46582a1af7a7897ff6fd97ce
706,750
def list_keys(client, keys): """ :param client: string :param keys: list of candidate keys :return: True if all keys exist, None otherwise """ objects = client.get_multi(keys) if bool(objects): return objects else: return None
4370053b76ea526e1f43309112f85f968ce76b6b
706,751
from typing import Union from typing import List import os import warnings def gather_simulation_file_paths(in_folder: str, filePrefix: str = "", fileSuffixes: Union[str, List[str]] = [".tre", ".tre.tar.gz"], files_per_folder: int = 1, verbose: bool = False) -> List[str]: """gather_simulation_file_paths find energy trajectory files in a folder Parameters ---------- in_folder : str directory where the files should be searched filePrefix : str, optional prefix of the file name pattern (default "") fileSuffixes : Union[str, List[str]] suffixes of the file name pattern (default [".tre", ".tre.tar.gz"]) files_per_folder : int, optional number of files per folder (default 1) verbose : bool, optional verbose output (default False) Returns ------- List[str] list of sorted files """ files = [] if (isinstance(fileSuffixes, str)): fileSuffixes = [fileSuffixes] if (verbose): print("SEARCH PATTERN: " + filePrefix + " + * +" + str(fileSuffixes)) for dirname, dirnames, filenames in os.walk(in_folder): if (str(dirname[-1]).isdigit() and os.path.basename(dirname).startswith("eq")): continue # check actual in_dir for fle pattern tmp_files = [file for file in filenames if (filePrefix in file and any([suffix in file for suffix in fileSuffixes]))] if (len(tmp_files) == files_per_folder): files.extend(list(map(lambda x: dirname + "/" + x, tmp_files))) if verbose: print("walking to in_dir: ", os.path.basename(dirname), "found: ", len(tmp_files)) try: keys = [[int(y) for y in x.split("_") if (y.isdecimal())][-1] for x in files] sorted_files = list(map(lambda y: y[1], sorted(zip(keys, files), key=lambda x: x[0]))) except: warnings.warn("Files are not all enumerated! no file sorting.") sorted_files = files if (verbose): print("\nfoundFiles:\n") print("\t" + "\n\t".join(sorted_files)) if (len(sorted_files) == 0): raise ValueError("could not find any file with the prefix: " + filePrefix + " in folder : \n" + in_folder) return sorted_files
1f5473b147a16dfc6cb2f5101d264f45161c8e6b
706,752
import re def replace_whitespace(s, rep=' '): """Replace any length white spaces in the given string with a replacement. Parameters ---------- s : str The string in which any length whitespaces should be replaced. rep : Optional[str] The string with which all whitespace should be replaced. By default, the plain ASCII space ( ) is used. Returns ------- str The string in which whitespaces have been replaced. """ s = re.sub(r'\s+', rep, s) return s
b583a627dda830275822f6276af33b58afb55f1e
706,753
def fix_mocov2_state_dict(state_dict): """ Ref: https://bit.ly/3cDfGVA """ new_state_dict = {} for k, v in state_dict.items(): if k.startswith("model.encoder_q."): k = k.replace("model.encoder_q.", "") new_state_dict[k] = v return new_state_dict
13471d6863eb14eb3248f6d6e1d6b5882c341ed0
706,754
def WHo_mt(dist, sigma): """ Speed Accuracy model for generating finger movement time. :param dist: euclidian distance between points. :param sigma: speed-accuracy trade-off variance. :return: mt: movement time. """ x0 = 0.092 y0 = 0.0018 alpha = 0.6 x_min = 0.006 x_max = 0.06 k_alpha = 0.12 if dist == 0: dist = 0.0000001 mt = pow((k_alpha * pow(((sigma - y0) / dist), (alpha - 1))), 1 / alpha) + x0 return mt
36d8b7e913df658b52f1f03617d0b9817091d0ef
706,755
def find_next_sibling_position(element, tag_type): """ Gets current elements next sibling's (chosen by provided tag_type) actual character position in html document :param element: Whose sibling to look for, type: An object of class bs4.Tag :param tag_type: sibling tag's type (e.g. p, h2, div, span etc. ), type: A string :return: An Integer specifying character pos. in html, infinite when no sibling is found """ nxt_sib = element.find_next_sibling(tag_type) return float("inf") if nxt_sib is None else nxt_sib.sourcepos
9b912fd9b7d30e81d6b4c2fec0e0573017b51a83
706,756
import subprocess def CheckOutput(cmd, **kwargs): """Call subprocess.check_output to get output. The subprocess.check_output return type is "bytes" in python 3, we have to convert bytes as string with .decode() in advance. Args: cmd: String of command. **kwargs: dictionary of keyword based args to pass to func. Return: String to command output. """ return subprocess.check_output(cmd, **kwargs).decode()
b4eb9ac552124c56f76c0c684c2d515558307aa4
706,757
def chunks(l, n): """ Split list in chunks - useful for controlling memory usage """ if n < 1: n = 1 return [l[i:i + n] for i in range(0, len(l), n)]
d878aeb50bd42c9f5a2060f4bb2747aecb1a3b58
706,758
def FilterKeptAttachments( is_description, kept_attachments, comments, approval_id): """Filter kept attachments to be a subset of last description's attachments. Args: is_description: bool, if the comment is a change to the issue description. kept_attachments: list of ints with the attachment ids for attachments kept from previous descriptions, if the comment is a change to the issue description. comments: list of IssueComment PBs for the issue we want to edit. approval_id: int id of the APPROVAL_TYPE fielddef, if we're editing an approval description, or None otherwise. Returns: A list of kept_attachment ids that are a subset of the last description. """ if not is_description: return None attachment_ids = set() for comment in reversed(comments): if comment.is_description and comment.approval_id == approval_id: attachment_ids = set([a.attachment_id for a in comment.attachments]) break kept_attachments = [ aid for aid in kept_attachments if aid in attachment_ids] return kept_attachments
89732832db557835a5dea1ef10229bfdd809d304
706,759
import os def scan_fixtures(path): """Scan for fixture files on the given path. :param path: The path to scan. :type path: str :rtype: list :returns: A list of three-element tuples; the app name, file name, and relative path. """ results = list() for root, dirs, files in os.walk(path): relative_path = root.replace(path + "/", "") if relative_path.startswith("static") or relative_path.startswith("theme"): continue for f in files: if not f.endswith(".json"): continue app_name = os.path.basename(os.path.dirname(relative_path)) results.append((app_name, f, relative_path)) return results
c85e8281a2f9005feb1801083138b55cb5079cf6
706,760
import logging def lookup_cpe(vendor, product, cpe_type, cpe_table, remap): """Identify the correct vendor and product values for a CPE This function attempts to determine the correct CPE using vendor and product values supplied by the caller as well as a remapping dictionary for mapping these values to more correct values used by NIST. For example, the remapping might tell us that a value of 'alpine' for the vendor string should be 'aplinelinux' instead, or for product 'solaris' should be 'sunos'. This function should only emit values seen in the official NIST CPE list which is provided to it in cpe_table. Lookup priority: 1. Original vendor / product 2. Original vendor / remap product 3. Remap vendor / original product 4. Remap vendor / remap product Args: vendor (str): vendor name product (str): product name cpe_type (str): CPE type - o, a, h, etc. cpe_table (dict): dict containing the official NIST CPE data remap (dict): dict containing the remapping values Returns: success, vendor, product """ if ( vendor in cpe_table[cpe_type] and product in cpe_table[cpe_type][vendor] ): # Hot path, success with original values return True, vendor, product # Everything else depends on a remap of some sort. # get the remappings for this one vendor string. vendor_remap = remap.get(vendor, None) if vendor_remap: # If we have product remappings, work that angle next possible_product = None if ( vendor_remap.get('products', None) and product in vendor_remap['products'] ): possible_product = vendor_remap['products'][product] if (vendor in cpe_table[cpe_type] and possible_product and possible_product in cpe_table[cpe_type][vendor]): # Found original vendor, remap product return True, vendor, possible_product # Start working the process to find a match with a remapped vendor name if vendor_remap.get('vendor', None): new_vendor = vendor_remap['vendor'] if new_vendor in cpe_table[cpe_type]: if product in cpe_table[cpe_type][new_vendor]: # Found remap vendor, original product return True, new_vendor, product if possible_product and possible_product in cpe_table[cpe_type][new_vendor]: # Found remap vendor, remap product return True, new_vendor, possible_product logging.error("Product %s from vendor %s invalid for CPE %s and no mapping", product, vendor, cpe_type) return False, None, None
5a6e2e735daa50d3d2a19022db002ebfc647335c
706,761
def main(): """main function of git learning """ return 'Google git'
a7296a18657643188ef58131fe012df6543f808e
706,762
def fiveplates_clean_design_file(field, designID): """ string representation of targets_clean file for field within fiveplates_field_files zip file. Parameters ---------- field : str identifier of field, e.g. 'GG_010' """ return f'{field}_des{designID}_targets_clean.txt'
c6e5c60ad08aa3e4162700f3d48e58d35a57486e
706,763
def formalize_rules(list_rules): """ Gives an list of rules where facts are separeted by coma. Returns string with rules in convinient form (such as 'If' and 'Then' words, etc.). """ text = '' for r in list_rules: t = [i for i in r.split(',') if i] text += 'If %s,\n' % t[0] for i in t[1:-1]: text += ' %s,\n' % i text += 'Then: %s.\n' % t[-1] return text
d8fbb024f38ae097efa42f95efe6b5d3b5adbd71
706,764
import os import subprocess def find(name, environment=None, guess=None): """Finds a particular binary on this system. Attempts to find the binary given by ``name``, first checking the value of the environment variable named ``environment`` (if provided), then by checking the system path, then finally checking hardcoded paths in ``guess`` (if provided). This function is cross-platform compatible - it works on Windows, Linux, and Mac. If there are spaces in the path found, this function will wrap its return value in double quotes. Args: name (str): Binary name. environment (str): An optional environment variable to check. guess (iterable): An optional list of hardcoded paths to check. Returns: A string with the absolute path to the binary if found, otherwise ``None``. """ def sanitize(path): quotes = ("'", "'") if " " in path and path[0] not in quotes and path[-1] not in quotes: path = '"{}"'.format(path) return path if environment: path = os.environ.get(environment) if path is not None: path = os.path.abspath(os.path.expanduser(path)) if os.path.isfile(path): return sanitize(path) if os.name == "posix": search = "which" elif os.name == "nt": search = "where.exe" else: raise EnvironmentError("unknown platform: {}".format(os.name)) try: with open(os.devnull, "w") as output: path = subprocess.check_output([search, name], stderr=output).decode( "utf-8" ) return sanitize(os.path.abspath(path.strip())) except subprocess.CalledProcessError: pass if guess: for path in guess: if os.path.isfile(path): return sanitize(path) return None
d3f8d4375804dc54e0187b6b3f8ab53b2120acd7
706,765
import os def get_lib_ver(library_path=""): """Returns the version of the Minipresto library. ### Parameters - `library_path`: The Minipresto library directory.""" version_file = os.path.join(library_path, "version") try: with open(version_file, "r") as f: for line in f: line = line.strip() if line: return line return "NOT FOUND" except: return "NOT FOUND"
e42b029762ca8e6baee12062464134f13ae71522
706,766
import yaml def load_yaml_config(path): """returns the config parsed based on the info in the flags. Grabs the config file, written in yaml, slurps it in. """ with open(path) as f: config = yaml.load(f, Loader=yaml.FullLoader) return config
0ee100a6e4d25881f8b8ab4ced723f600e878e28
706,767
import argparse def setup_args(): """Setup and return the command line argument parser""" parser = argparse.ArgumentParser(description='') # parser.add_argument('csv', type=str, help='CSV file to load') parser.add_argument( '-clang-tidy-binary', help='Path to the clang-tidy executable.', metavar='PATH', required=True) parser.add_argument('-clang-apply-replacements-binary', help='Path to the clang-apply-replacements binary. Required when using -fix and -runner-py' + ' arguments.') parser.add_argument( '-runner-py', help='Python script wrapping clang-tidy with support for multiple jobs. run-clang-tidy.py ships' + ' with clang-tidy. Without this clang-tidy is run directly.', metavar='PATH') parser.add_argument('-fix', action='store_true', help='Apply automatic fixes. Passes -fix to clang-tidy. When using -runner-py' + ' (run-clang-tidy.py), the argument -clang-apply-replacements-binary must also be set to the' + ' clang-apply-fixes binary.') parser.add_argument( '-config-file', help='clang-tidy configuration file. Extracted and passed as the -config argument to' + ' clang-tidy.') parser.add_argument( '-p', help='clang-tidy build path (path to compile_commands.json). Extracted and passed as the -p argument to' + ' clang-tidy.', required=False) parser.add_argument( '-j', help='Number of parallel jobs to run. Only supported when using the -runner-py script. Ignored ' + 'otherwise.', required=False) parser.add_argument( '-relative-to', help='Modify clang-tidy message paths to be relative to this directory. Intended for CI' + ' builds to report portable paths.', required=False) return parser
477da4faf063a461a77791f372f50e0e105b8ac7
706,768
import requests def isLinkValid(test_video_link): """def isLinkValid(test_video_link): -> test_video_link check if youtube video link is valid.""" try: data = requests.get("https://www.youtube.com/oembed?format=json&url=" + test_video_link).json() if data == "Not Found": return False else: return True except: return False
0af4f8c1d05f2b98d046d63d5eaf39f679a37818
706,769
def freenas_spec(**kwargs): """FreeNAS specs.""" # Setup vars from kwargs builder_spec = kwargs['data']['builder_spec'] bootstrap_cfg = None builder_spec.update( { 'boot_command': [ '<enter>', '<wait30>1<enter>', 'y', '<wait5><spacebar>o<enter>', '<enter>', '{{ user `password` }}<tab>{{ user `password` }}<tab><enter>', '<enter>', '<wait60><wait60><wait60>', '<enter>', '3<enter>', '<wait60><wait60><wait60><wait60><wait60>', '9<enter>', 'curl -X PUT -u {{ user `username` }}:{{ user `password` }} -H \'Content-Type: application/json\' -d \'{\"ssh_rootlogin\": true}\' http://localhost/api/v1.0/services/ssh/<enter>', # noqa: E501 'curl -X PUT -u {{ user `username` }}:{{ user `password` }} -H \'Content-Type: application/json\' -d \'{\"srv_enable\": true}\' http://localhost/api/v1.0/services/services/ssh/<enter>' # noqa: E501 ], 'boot_wait': '30s', 'shutdown_command': 'shutdown -p now', } ) return bootstrap_cfg, builder_spec
ffe666fd48b6d545e44389ae0413bc1f0c29c44e
706,770
import os def _check_resource(resource_path: str) -> bool: """ Checks if the resource is file and accessible, or checks that all resources in directory are files and accessible :param resource_path: A path to the resource :return: True if resource is OK to upload, False otherwise """ if os.path.isfile(resource_path): try: open(resource_path, 'rb') return True except PermissionError or FileNotFoundError: return False return True
39f8109054367fe2c7f3f5dc61b24564f81160d7
706,772
def calc_precision(output, target): """calculate precision from tensor(b,c,x,y) for every category c""" precs = [] for c in range(target.size(1)): true_positives = ((output[:, c] - (output[:, c] != 1).int()) == target[:, c]).int().sum().item() # print(true_positives) false_positives = ((output[:, c] - (output[:, c] != 1).int()) == (target[:, c] != 1).int()).int().sum().item() # print(false_positives) if (true_positives == 0): precs.append(1.0) else: precs.append(true_positives / (true_positives + false_positives)) return precs
c35c500c786539578c46a8e8c4f6517bf30b4525
706,773
import uuid def get_unique_id(): """ for unique random docname :return: length 32 string """ _id = str(uuid.uuid4()).replace("-", "") return _id
4cf99a919bd0e9672f0b186626df0532cacebaf4
706,774
def verify_count_responses(responses): """ Verifies that the responses given are well formed. Parameters ---------- responses : int OR list-like If an int, the exact number of responses targeted. If list-like, the first two elements are the minimum and maximum (inclusive) range of responses targeted. If a third item is in the list it must be a list of values from which the range of target responses is being restricted. Returns ------- None """ if isinstance(responses, int): responses = [responses] elif isinstance(responses, (list, tuple)): if not len(responses) in [2, 3]: raise IndexError ( "The responses list given to has_count() is must have " "either 2 or 3 items in the form: " "[min, max, [values subset]]. Found %s." % (responses) ) valid_types = [int, int, (list, tuple)] for r, response in enumerate(responses): if not isinstance(response, valid_types[r]): raise TypeError ( "The responses list given to has_count() has " "incorrectly typed items. It must be either 2 or 3 " "items in the form: [int, int, list/tuple]. " "Found %s." % (responses) ) if r==3: for value in response: if not isinstance(value, int): raise TypeError ( "The values subset given as the third item " "in has_count(responses) is not correctly " "typed. Each value must be int. " "Found %s." % (response) ) return responses
63fbd00bc26fee8eb960f389d5d56178e90ff7ae
706,775
def _subtract(supernet, subnets, subnet_idx, ranges): """Calculate IPSet([supernet]) - IPSet(subnets). Assumptions: subnets is sorted, subnet_idx points to the first element in subnets that is a subnet of supernet. Results are appended to the ranges parameter as tuples of in format (version, first, last). Return value is the first subnet_idx that does not point to a subnet of supernet (or len(subnets) if all subsequents items are a subnet of supernet). """ version = supernet._module.version subnet = subnets[subnet_idx] if subnet.first > supernet.first: ranges.append((version, supernet.first, subnet.first - 1)) subnet_idx += 1 prev_subnet = subnet while subnet_idx < len(subnets): cur_subnet = subnets[subnet_idx] if cur_subnet not in supernet: break if prev_subnet.last + 1 == cur_subnet.first: # two adjacent, non-mergable IPNetworks pass else: ranges.append((version, prev_subnet.last + 1, cur_subnet.first - 1)) subnet_idx += 1 prev_subnet = cur_subnet first = prev_subnet.last + 1 last = supernet.last if first <= last: ranges.append((version, first, last)) return subnet_idx
a7c738b5ddab1ed896677a011029a00af5779bcd
706,776
def strip_spectral_type(series, return_mask=False): """ Strip spectral type from series of string Args: series (pd.Series): series of object names (strings) return_mask (bool): returns boolean mask True where there is a type Returns: no_type (pd.Series): series without spectral types type_mask (pd.Series): boolean mask where type is given """ type_mask = series.str.match('\\([OBAFGKM]\\)') no_type = series.copy() no_type[type_mask] = series[type_mask].str.slice(start=4) return (no_type, type_mask) if return_mask else no_type
65b91749742b229637819582b1158554b1a457ea
706,777
def expand_groups(node_id, groups): """ node_id: a node ID that may be a group groups: store group IDs and list of sub-ids return value: a list that contains all group IDs deconvoluted """ node_list = [] if node_id in groups.keys(): for component_id in groups[node_id]: node_list.extend(expand_groups(component_id, groups)) else: node_list.extend([node_id]) return node_list
4c4b9c569a85396f201c589635b6ecea3807ddc2
706,778
import torch def gram_matrix(image: torch.Tensor): """https://pytorch.org/tutorials/ advanced/neural_style_tutorial.html#style-loss""" n, c, h, w = image.shape x = image.view(n * c, w * h) gram_m = torch.mm(x, x.t()).div(n * c * w * h) return gram_m
5912cfec026cba26a77131c3b52a8e751c0f575e
706,779
def sw_update_opts_w_name_db_model_to_dict(sw_update_opts, subcloud_name): """Convert sw update options db model plus subcloud name to dictionary.""" result = {"id": sw_update_opts.id, "name": subcloud_name, "subcloud-id": sw_update_opts.subcloud_id, "storage-apply-type": sw_update_opts.storage_apply_type, "compute-apply-type": sw_update_opts.compute_apply_type, "max-parallel-computes": sw_update_opts.max_parallel_computes, "alarm-restriction-type": sw_update_opts.alarm_restriction_type, "default-instance-action": sw_update_opts.default_instance_action, "created-at": sw_update_opts.created_at, "updated-at": sw_update_opts.updated_at} return result
c9c1703d9e4d0b69920d3ab06e5bf19fbb622103
706,780