content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def count_ontarget_samples(df, human_readable=False): """ Function to count usable samples. Parameters ---------- df: DataFrame human_readable: Boolean, optional default=False Returns ------- ontarget_counts: DataFrame MultiIndexed if human_readable, otherwise "step" by "participant" """ ontarget_counts = df[ (df["ontarget"]==True) ][ ["step", "target", "participant", "ontarget"] ].groupby( ["step", "target", "participant"] ).count().unstack(fill_value=0) if human_readable: return(ontarget_counts) ontarget_counts.set_index( ontarget_counts.index.droplevel("target"), inplace=True ) ontarget_counts.columns = ontarget_counts.columns.droplevel() return(ontarget_counts)
3bb2532017089ab08ac53422baaa55a5b38ee4e3
703,214
def convertToMapPic(byteString, mapWidth): """convert a bytestring into a 2D row x column array, representing an existing map of fog-of-war, creep, etc.""" data = [] line = "" for idx,char in enumerate(byteString): line += str(ord(char)) if ((idx+1)%mapWidth)==0: data.append(line) line = "" return data
f6d78db10efc041cb55208f5428c99c25bd5ab5d
703,215
import csv def _read_file_to_dict(path): """ Load the problems and the corresponding labels from the *.txt file. :param path: The full path to the file to read :return: The dictionary with the problem names as keys and the true class labels as values """ label_dict = {} with open(path, 'r', encoding='utf-8-sig') as truth_file: truth = csv.reader(truth_file, delimiter=' ') for problem in truth: label_dict[problem[0]] = problem[1] return label_dict
83bd3b04afc995176dc4dfefb9863b9f1ba09888
703,216
import os import lzma import gzip def zopen(filename, mode): """Open filename.xz, filename.gz or filename.""" filenamexz = str(filename) if str(filename).endswith(".xz") else str(filename) + '.xz' filenamegz = str(filename) if str(filename).endswith(".gz") else str(filename) + '.gz' if os.path.exists(filenamexz): return lzma.open(filenamexz, mode) elif os.path.exists(filenamegz): return gzip.open(filenamegz, mode) else: return open(filename, mode)
d0a0c6221b9c73d5e13d6eaa84c321a6d332720b
703,217
def trace(fn): """Decorator that marks a function to be traced.""" fn.should_trace = True return fn
598d81b2f4050b78cd42c835c5ce3bcc41c87541
703,218
import os import json def _conf(): """Try load local conf.json """ fname = os.path.join(os.path.dirname(__file__), "conf.json") if os.path.exists(fname): with open(fname) as f: return json.load(f)
bdc4376e9fd6b5721cba54d48d07d12ab907223c
703,219
def get_key(key): """ Get key :param: key - Color key """ return key.replace('-', '')
62aa5a9c08994ced2ec0c5da283d408685d8f583
703,220
import csv def readPlumes(filename, logger=None): """ read plumes from filename that contains plume time and lat lon """ if logger is not None: logger.info("reading {}".format(filename)) with open(filename,'rt') as fin: plumes = list(csv.DictReader(fin, skipinitialspace=True)) return plumes
2bf6ee36807e970b5180f7075fa1b1e70493bb5d
703,221
def get_sm_tag_from_alignedseg(aln): """Get 'sm' tag from AlignedSegment.""" try: return aln.get_tag('sm') except Exception as e: raise ValueError("Could not get 'sm' tag from {aln}".format(aln=aln))
ca23604f724f75bf4c399374547f1468c6c5df9b
703,222
def service2(backends_mapping, custom_service, service_settings2, service_proxy_settings, lifecycle_hooks): """ We need second service to test with because we want to test deletion of active docs and that needs to be tested on separate service. """ return custom_service(service_settings2, service_proxy_settings, backends_mapping, hooks=lifecycle_hooks)
eca7cfe0869f051aa03494bfd9ec0e0083c856c0
703,223
import argparse def parse_arguments(args_to_parse): """ Parse the command line arguments. Arguments: args_to_parse: CLI arguments to parse """ parser = argparse.ArgumentParser( description='Split the two CTM files (*.stm) (alignment files) into the respective lattice file directories' ) parser.add_argument( '-o', '--output-dir', type=str, required=True, help='Destination directory for the split output files' ) parser.add_argument( '-dev', '--dev-ctm', type=str, required=True, help='Path to the file containing the dev set train.stm alignment' ) parser.add_argument( '-eval', '--eval-ctm', type=str, required=True, help='Path to the file containing the eval set train.stm alignment' ) args = parser.parse_args() return args
978b83b96ecbbd562c4f675bfb64b32c5e624246
703,224
def get_fixed_length_string(string: str, length=20) -> str: """ Add spacing to the end of the string so it's a fixed length. """ if len(string) > length: return f"{string[: length - 3]}..." spacing = "".join(" " for _ in range(length - len(string))) return f"{string}{spacing}"
e77f3c7ed72efc3b86d378fa6cf9bde4eae95647
703,225
import numpy def V3(meanFalse, meanTrue, sample): """ This NMC distance metric scores samples by considering a point that is halfway {meanFalse, meanTrue}, then calculating the cosine of the angle {sample, halfway, meanTrue}. Points towards meanTrue get a score of close to +1, while points towards meanFalse get a score close to -1. """ halfway = 0.5 * (meanFalse + meanTrue) return numpy.inner(sample - halfway, meanTrue - halfway) / (numpy.linalg.norm(sample - halfway) * numpy.linalg.norm(meanTrue - halfway))
ad94501d57a24ff07b2dd21bc4965ea495f0f7c7
703,226
def specific_gravity(temp, salinity, pressure): """Compute seawater specific gravity. sg = C(p) + β(p)S − α(T, p)T − γ(T, p)(35 − S)T units: p in “km”, S in psu, T in ◦C C = 999.83 + 5.053p − .048p^2 β = .808 − .0085p α = .0708(1 + .351p + .068(1 − .0683p)T) γ = .003(1 − .059p − .012(1 − .064p)T) For 30 ≤ S ≤ 40, −2 ≤ T ≤ 30, p ≤ 6 km: good to .16 kg/m3 For 0 ≤ S ≤ 40, good to .3 kg/m3 """ C = 999.83 + 5.053 * pressure - 0.048 * pressure * pressure beta = 0.808 - 0.0085 * pressure alpha = 0.0708 * (1.0 + 0.351*pressure + 0.068 * (1 - 0.0683*pressure) * temp) gamma = 0.003 * (1.0 - 0.059*pressure - 0.012 * (1.0 - 0.064*pressure) * temp) sg = C + beta * salinity - alpha * temp - gamma * (35 - salinity) * temp return sg
37ee32d3842cd5f9645449b23feb4d8315536fe2
703,227
def record(MyRecord, db): """Create a record instance of MyRecord.""" return MyRecord.create({'title': 'test'})
f4216400ceddaf415fbad97d74e8f55b35835511
703,228
def reciprocal_mod(input_x, input_m): """ # Based on a simplification of the extended Euclidean algorithm :param input_x: :param input_m: :return: """ assert 0 <= input_x < input_m intermediate_y = input_x input_x = input_m intermediate_a = 0 intermediate_b = 1 while intermediate_y != 0: intermediate_a, intermediate_b = ( intermediate_b, intermediate_a - input_x // intermediate_y * intermediate_b, ) input_x, intermediate_y = intermediate_y, input_x % intermediate_y if input_x == 1: return intermediate_a % input_m raise ValueError("Reciprocal does not exist")
2d35399fbd84509012600efd1c5663b123cb9b2a
703,229
def np(self): """ Returns numpy array of the object. It returns coordinates,(and ids for line and polygon) XY coordinates are always place last. Note ---- x:x-coordinate y:y-coordinate lid: line id pid: polygon id cid: collection id Output ------ ndarray: 2D array shape: Point, (npoint,2) : [[x,y]] LineString, (npoint,3) : [[lid,x,y]] Polygon, (npoint,4) : [[pid,lid,x,y]] MultiPoint, (npoint,3) : [[x,y]] MultiLineString, (npoint,4) : [[cid,lid,x,y]] MultiPolygon, (npoint,5) : [[cid,pid,lid,x,y]] """ return self._np()
3c636f0c34676af38d65ca1527e868b070f7e57b
703,230
from typing import Optional import os def ensure_cpu_count(use_threads: bool = True) -> int: """Get the number of cpu cores to be used. Note ---- In case of `use_threads=True` the number of threads that could be spawned will be get from os.cpu_count(). Parameters ---------- use_threads : bool True to enable multi-core utilization, False to disable. Returns ------- int Number of cpu cores to be used. Examples -------- >>> from awswrangler._utils import ensure_cpu_count >>> ensure_cpu_count(use_threads=True) 4 >>> ensure_cpu_count(use_threads=False) 1 """ cpus: int = 1 if use_threads is True: cpu_cnt: Optional[int] = os.cpu_count() if cpu_cnt is not None: cpus = cpu_cnt if cpu_cnt > cpus else cpus return cpus
31e00fd7d5a9e7c91cdee97adb0cfa95a4679ee9
703,231
def find_dom_root(parent_dom_node, dom_node): """ .. seealso:: :meth:`find_placeable_dom_tree_roots` """ if dom_node is None or parent_dom_node is None: return None if dom_node.getparent() == parent_dom_node: return dom_node elif dom_node.getparent() is None: return None else: return find_dom_root(parent_dom_node, dom_node.getparent())
51cab59b4e07655277166281e8290fc9eee0e7be
703,232
import os def pathCorrectCase(path): """ return a normalized file path to the given path. Fixes any potential case errors. """ if os.path.exists(path): return path parts = path.replace("\\", "/").split('/'); if parts[0] == '~': newpath = os.path.expanduser('~') elif parts[0] == ".": newpath = os.getcwd() else: newpath = '/'+parts[0] for i in range(1,len(parts)): if parts[i] == "." or parts[i] == "": # a dot is the same as the current directory # newpath does not need to be changed continue elif parts[i] == "..": # change to the parent directory if newpath !="/": newpath = os.path.split(newpath)[0] continue # test that the given part is a valid file or folder testpath = os.path.join(newpath,parts[i]) if os.path.exists(testpath): newpath = testpath; else: # scan the directory for files with the # same name, ignoring case. temp = parts[i].lower(); for item in os.listdir(newpath): if item.lower() == temp: newpath = os.path.join(newpath,item) break; else: print(path) raise Exception('Path `%s/%s` not found'%(newpath,temp)) return newpath
ab8b825d899c28c17b292311c2a1cef284bb00c6
703,233
import subprocess def _safe_call(cmd_list): """Makes a subprocess check_call and outputs a clear error message on failure and then exits""" try: subprocess.check_output(cmd_list) return True except subprocess.CalledProcessError as err_thrown: print('Error while calling "%s"', err_thrown.cmd) return False
5bf517b5f0d5bd05b30f269dd75ea9217aeff5d4
703,234
def reverse_bits(num): """ reverses the bits representing the number :param num: a number treated as being 32 bits long :return: the reversed number """ result = 0 for i in range(32): result <<= 1 result |= num & 1 num >>= 1 return result
262e589cf366065018a57cd6a6c443bdd8eb638e
703,235
def calculate_total_profit(df): """ 1. Считает итоговую прибыль :param df: - датафрейм с колонкой '<DEAL_RESULT>' :return: - итог применения стратегии """ return df.dropna()['<PERFORMANCE>'].values[-1]
918e200d276dbd3c630b5bdcb11cf771f36950e5
703,236
def getMatrixListFromPoint(point): """ Args: point (MPoint) Returns: list """ return [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, point.x, point.y, point.z, 1]
70a032f17fb9da468fa6011569fc42717008d8ec
703,237
def receberInt(msg = '\tDigite um número inteiro: '): """ -> Valida a entrada de um número inteiro pelo teclado*Caso a entrada seja inválida será exigidauma nova entrada. :param msg:mensagem a ser impressa :return:número inteiro recebido """ while True: resp = input(msg).strip() if resp.isnumeric(): num = int(resp) break else: print('\t!!! Resposta Inválida !!!') return num
2140bb872ac8acc73e0148129344f4a4e91e69a0
703,238
import requests def convert_using_api(from_currency, to_currency): """ convert from from_currency to to_currency by requesting API """ convert_str = from_currency + '_' + to_currency options = {'compact': 'ultra', 'q': convert_str} api_url = 'https://free.currencyconverterapi.com/api/v5/convert' result = requests.get(api_url, params=options).json() return result[convert_str]
f261dcf6c97a8e5697e6b1005513b34f755f541f
703,239
import hashlib def sha224(msg): """ :return: BitString of the hash """ return hashlib.sha224(msg).digest()
1339e988a0f26bd4068112b12e312cdf7df38586
703,240
def get_mouseState(): """ get current mouse activation state """ with open('/home/noah/.config/i3/mousestate') as fp: arr = fp.readlines() if len(arr) > 0: if arr[0] == "0": return {'full_text' : '%s' % "", 'name' : 'mousestate', "color": "#888888"} elif arr[0] == "1": return {'full_text' : '%s' % "", 'name' : 'mousestate', "color": "#00ff00"} else: return ""
8ee7d2644bbc3ec7fbdec50a4c98e1f508626841
703,241
def prefix_as_comment(comment_prefix, text): """Adds comment prefixes to new lines in comments """ return text.replace('\n', '\n' + comment_prefix)
805f8e6260435a558b70e24f6692fc2a7dc6e764
703,242
import os def list_all_measurement_stations(json_dir): """Returns a list of the GIC measurement stations available. Parameters: ----------- json_dir :: str Directory containing json files for all stations with measurements. Returns: -------- all_st :: list List of all stations in json_dir. """ files = os.listdir(json_dir) json_files = [x for x in files if '.json' in x] all_st = [] for json_file in json_files: all_st.append(json_file.strip(".json")) return all_st
2df2e7aada147bef31588c11d2c59ea18e2fd32f
703,243
import re def remove_brackets(s): """Remove brackets [] () from text """ return re.sub(r'[\(\(].*[\)\)]', '', s)
82685dfa66c2b1185a3e106f7289af5856c8e56e
703,244
def calcAbsolutePercentageError(actualResult, forecastResult): """ Calculates Absolute Percentage error. returns float """ return (abs((actualResult - forecastResult)/actualResult)) * 100
6212f368477ece2a6f602ab8cdc865eeedafc864
703,245
def match_hyperparameter(hp, parameters): """ Given a partial hyperparameter name hp find the corresponding full name in parameters """ matches = [] for par in parameters: if hp == par: matches.append(par) if len(matches) != 1: raise ValueError('{} matches found for hyperparameter {}. Must be exactly 1'.format(len(matches), hp)) return matches[0]
edf4d19638ee077d5dcf903db0b1ff1325a20fb0
703,246
def can_embed_image(repo, fname): """True if we can embed image file in HTML, False otherwise.""" if not repo.info.embed_images: return False return ("." in fname) and ( fname.split(".")[-1].lower() in ["jpg", "jpeg", "png", "gif"] )
40bfdd8c32ddd5f3d3bd2ae074494ba34e6fc1f1
703,247
def get_modal_triggers(offend_atoms, implied_modalities): """ :param offend_atoms: set of offending modal atoms at given w :param implied_box: set of tuples representing implied boxes and implied diamonds :return set of antecedent atoms in modal implications """ triggers = set() for atom in offend_atoms: for imp in implied_modalities: if atom == imp[0][1]: triggers.add(imp[1]) return triggers
fb98cfba81a12ee0c0c466ceb601929da959fc84
703,248
def datetime_to_isoformat(dt): #============================= """ Convert a Python datetime to an ISO 8601 representation. :param dt: A Python :class:~`datetime.datetime`. :return: A string representation of the date and time formatted as ISO 8601. """ iso = dt.isoformat() if iso.endswith('+00:00'): return iso[:-6] + 'Z' else: return iso
508ce4ea3e0905aab0b16c6b28fa4e9304e18b08
703,249
import os def test_aws_availability(): """ Test if aws s3 is available """ s3_status = os.system('aws s3 ls s3://stpubdata --request-payer requester > /tmp/aws.x') if s3_status == 0: s3_sync = 'cp' # As of late October 2018, 's3 sync' not working with 'stpubdata' else: s3_sync = False # Fetch from ESA archive return s3_sync
889eb45d3e7556b6ffe80258cc96e12b24c79ccd
703,250
def parse_commamd (cmd): """Parses a command provide from the command line. Parses a command found on the command line. I Args: cmd: The command, e.g. `paste` or `type:hello` Returns: a list of command, data """ parts = cmd.split(":") data = ":".join(parts[1:]) return (parts[0], data)
c1a40f1508cb568e3a2ebf5b82f96baf81108fe1
703,251
import json def read_dataset_json(path): """ Read playlists from dataset json file Parameters: - path - absolute path of the file """ with open(path, "r") as f: data = json.load(f) return data["playlists"]
06b5e6b6d07c549ed459d9567efd316f6412c13b
703,252
def pad_string(data, size, padding_character=' ', direction='left'): """This new function will determine if it will pad to the left or the right by using an if statement.""" if direction == 'left': data.rjust(size, padding_character) elif data == 'right': data.ljust(size, padding_character) return data
4a53a448e965c2ea1227360edca774567c4797a4
703,253
def disttar_suffix(env, sources): """tar archive suffix generator""" env_dict = env.Dictionary() if env_dict.has_key("DISTTAR_FORMAT") and env_dict["DISTTAR_FORMAT"] in ["gz", "bz2"]: return ".tar." + env_dict["DISTTAR_FORMAT"] else: return ".tar"
ef0b5378d3efaae68edb4c5cbaa5541c21f82a55
703,254
def _data_qubit_parity(q: complex) -> bool: """To optimally interleave operations, it's useful to split into a checkerboard pattern.""" return (q.real // 2 + q.imag) % 2 != 0
92f5b9288eb5a009befd7c99c7e7f67f589100a6
703,255
def get_traits_by_germplasm(germplasmId): # noqa: E501 """Returns all phenotypes for a germplasm that we have # noqa: E501 :param germplasmId: Unique database ID for the germplasm :type germplasmId: str :rtype: List[Phenotype] """ return 'do some magic!'
8fd66bd49b0276217e6422b42f5f8fa6115df060
703,256
import re def get_hosts(r_config, r_dest): """ :param r_config: 主机组配置[dir] :param r_dest: 目标主机组 [str] :return: [list] """ matched_str = [] rst = [] for key in r_config.keys(): if re.match(r_dest, key): matched_str.append(key) for key in matched_str: rst += r_config.get(key) # print rst return rst
dd4becccdc8ad64cd59a7936a6a4e14f3d515123
703,257
def get_mode_from_params(params): """Returns the mode in which this script is running. Args: params: Params tuple, typically created by make_params or make_params_from_flags. Raises: ValueError: Unsupported params settings. """ if params.forward_only and params.eval: raise ValueError('Only one of forward_only and eval parameters is true') if params.eval: return 'evaluation' if params.forward_only: return 'forward-only' return 'training'
35564684eef73adf821989dea27bfdc7de0443ae
703,258
def quote_logvalue(value): """Return a value formatted for use in a logfmt log entry. The input is quoted if it contains spaces or quotes; otherwise returned unchanged """ s = str(value) if " " in s or '"' in s: s = s.replace('"', "\\" + '"') return f'"{s}"' return s
15dd0789b5a7ce4e18eece37ad0cac59d9cd2332
703,259
def flatten(master): """ :param dict master: a multilevel dictionary :return: a flattened dictionary :rtype: dict Flattens a multilevel dictionary into a single-level one so that:: {'foo': {'bar': { 'a': 1, 'b': True, 'c': 'hello', }, }, } would become:: {'foo.bar.a': 1, 'foo.bar.b': True, 'foo.bar.a': 1, } You can mix and match both input (hierarchical) and output (dotted) formats in the input without problems - and if you call flatten more than once, it has no effect. """ result = {} def add(value, *keys): if keys in result: raise ValueError('Duplicate key %s' % keys) result[keys] = value def recurse(value, *keys): if isinstance(value, dict): for k, v in value.items(): recurse(v, k, *keys) else: key = '.'.join(reversed(keys)) if key in result: raise ValueError('Duplicate key %s' % str(keys)) result[key] = value recurse(master) return result
d31325219e43ee5c047c1a78589d94e2d7c62709
703,260
def m21_midievent_to_event(midievent): """Convert a music21 MidiEvent to a tuple of MIDI bytes.""" status = midievent.data + midievent.channel - 1 return (status, midievent.pitch, midievent.velocity)
3950b4e6715ac4de2dbdcc2d87d5cf51387a220c
703,261
from typing import Container from typing import Sequence def power_set_str_v2(s: str) -> Container[Sequence]: """ Note: it doesn't take empty set into accout. """ # print all subsets of the remaining elements, with given prefix def _power_set_str_v2(prefix: str, s: str, result) -> None: if len(s) > 0: # print(prefix+s[0]) # result.append(prefix+s[0]) result.append(prefix + s[0]) _power_set_str_v2(prefix + s[0], s[1:], result) _power_set_str_v2(prefix, s[1:], result) res = [] _power_set_str_v2("", s, res) return res
119a9e6118298f0d05ed3b14f43788e5d1c7ba49
703,262
import copy def merge_dict(d1, d2, overwrite=False): """Merge contents of d1 and d2 and return the merged dictionary Note: * The dictionaries d1 and d2 are unaltered. * If `overwrite=False` (default), a `RuntimeError` will be raised when duplicate keys exist, else any existing keys in d1 are silently overwritten by d2. """ # Note: May partially be replaced by a ChainMap as of python 3.3 if overwrite is False: sd1 = set(d1.keys()) sd2 = set(d2.keys()) intersect = sd1.intersection(sd2) if len(intersect) > 0: msg = "Dictionaries to merge have overlapping keys: %s" raise RuntimeError(msg % intersect) td = copy.deepcopy(d1) td.update(d2) return td
d680dcc3039804c340fc488a488fae1d891a8d1b
703,263
import os def cancel_study(args): """Flag a study to be cancelled.""" if not os.path.isdir(args.directory): return 1 lock_path = os.path.join(args.directory, ".cancel.lock") with open(lock_path, 'a'): os.utime(lock_path, None) return 0
68401bff4cb5bdfda554cf6301e517e8ca0f452b
703,265
import re def remove_repeating_characters(sentence): """ remove non alphaneumeric characters which repeat more than 3 times by its 3 occurrence (e.g. ----- to ---) :param sentence: :return: """ sentence = re.sub('(\W)\\1{3,}', '\\1', sentence) return sentence.strip()
9bf8e53c3fed78b2a8cd4c91a6a68f980c270654
703,266
import argparse def get_args() -> argparse.Namespace: """Get args.""" parser = argparse.ArgumentParser() parser.add_argument("-u", "--username", required=True, type=str, help="Zenfolio username") parser.add_argument("-p", "--password", required=True, type=str, help="Zenfolio password") parser.add_argument("-b", "--base-path", default="photos", help="root directory to store downloaded photos") parser.add_argument("-t", "--timeout", type=int, default=30, help="Download request timeout") return parser.parse_args()
cc099c5cf5d60da207c19107e9c1ef90385ab85b
703,267
import random def crossover(p_1, p_2, r_cross): """ order 1 crossover / OX / order crossover :param p_1: parent 1 :param p_2: parent 2 :param r_cross: rate of crossover """ if random.random() < r_cross: c1, c2 = p_1.copy(), p_2.copy() pt_1 = random.randint(0, len(p_1)-1) pt_2 = random.randint(0, len(p_2)-1) while pt_1 == pt_2: pt_2 = random.randint(0, len(p_2)-1) # let pt_1 != pt_2 pt_1, pt_2 = (pt_1, pt_2) if pt_1 < pt_2 else(pt_2, pt_1) # let pt_1 < pt_2 for _ in range(pt_2 - pt_1): c1.remove(p_2[pt_1 + _]) c2.remove(p_1[pt_1 + _]) return [c1[:pt_1] + p_2[pt_1:pt_2] + c1[pt_1:], c2[:pt_1] + p_1[pt_1:pt_2] + c2[pt_1:]] else: return [p_1, p_2]
d0bdc28803feed1a67864204b8b3177f70f8cda7
703,268
def add_month(year, month, delta): """ Helper function which adds `delta` months to current `(year, month)` tuple and returns a new valid tuple `(year, month)` """ year, month = divmod(year * 12 + month + delta, 12) if month == 0: month = 12 year = year - 1 return year, month
8f509bba44bb27579b948c3b26e5f7c027be445c
703,269
import json def __get_job_obj(profile): """Return the 'job' object in the profile.""" with open(profile, 'rt') as json_fobj: data = json.load(json_fobj) return data['jobs'][0]
2af6658f8a54987229dffe35efe37d2dace9f0bb
703,270
import torch def model_fn(batch, model, criterion, device): """Forward a batch through the model.""" mels, labels = batch mels = mels.to(device) labels = labels.to(device) outs = model(mels) loss = criterion(outs, labels) # Get the speaker id with highest probability. preds = outs.argmax(1) # Compute accuracy. accuracy = torch.mean((preds == labels).float()) return loss, accuracy
2b9907e8f0fbec50b955082efb30d8cddc88b663
703,271
import os import sys def get_base_dir(): """Attempts to locate ariadne's install directory.""" try: d=os.environ['ARIADNE_BASE'] if d[len(d)-1] != '/': d+='/' return d except: # This may be significantly better than using the environment variable. genpath=os.path.realpath(sys.path[0]+"/..") return genpath
b4ab46100e373f17700a06c4ccee1f82c5b97eb7
703,272
import argparse def get_arguments(): """ Parse input arguments """ parser = argparse.ArgumentParser(description="Code for evaluation") parser.add_argument('--best_iter', type=int, default=70000, help='iteration with best mIoU') parser.add_argument('--normalize', type=bool, default=False, help='add normalizor to the entropy ranking') parser.add_argument('--lambda1', type=float, default=0.67, help='hyperparameter lambda to split the target domain') parser.add_argument('--cfg', type=str, default='../ADVENT/advent/scripts/configs/advent.yml', help='optional config file') # ----------------------------------------------------------------# parser.add_argument("--FDA-mode", type=str, default="off", help="on: apply the amplitude switch between source and target, off: doesn't apply ampltude switch") parser.add_argument('--round', type=int, default=0, help='specify the round of self supervised learning') parser.add_argument("--MBT", type=bool, default=True) parser.add_argument("--LB", type=str, default="0", help="beta for FDA or MBT") parser.add_argument("--restore-opt1", type=str, default=None, help="restore model parameters from beta1") parser.add_argument("--restore-opt2", type=str, default=None, help="restore model parameters from beta2") parser.add_argument("--restore-opt3", type=str, default=None, help="restore model parameters from beta3") # ----------------------------------------------------------------# return parser.parse_args()
6f3a351c9630c5b524cf5e4bb8db6ee14f995cba
703,273
def get_context(canvas): """Get ``cairo.Context`` used to draw onto the canvas.""" return canvas.renderer._get_context()
1d68e6eb742dff906b6e64c85d9609e34f508b77
703,274
import typing import os def merge_data_from_gen_files_with_format(root_path: str) -> typing.List[str]: """从生成的多个带格式的文件中读取数据,并返回一个List """ file_list = [os.path.join(root_path, file) for file in os.listdir( root_path) if file.startswith('gpt2_gentext_') and file.endswith('.txt')] tmp = [] for file in file_list: with open(file, 'r', encoding='utf-8') as f: contents = f.read() block_split_list = contents.split( '====================\n') # typing.List[str] if block_split_list[-1] == '': block_split_list = block_split_list[:-1] for block in block_split_list: functions_list = block.split('NWU-NISL-TY-2019\n') tmp += functions_list return tmp
4b744effc8df57066e9c42c5d39aedef2fdf0510
703,275
def fahrenheit_from(celsius): """Convert Celsius to Fahrenheit degrees.""" try: fahrenheit = float(celsius) * 9 / 5 + 32 fahrenheit = round(fahrenheit, 3) # Round to three decimal places return str(fahrenheit) except ValueError: return "invalid input"
e31ac8c62f108652fe3cc2ee1516a5b3a1a9e568
703,276
def big_endian_to_int(value): """ Ethereum RLP Utils: Convert big endian to int :param value: big ending value :return: int value """ return int.from_bytes(value, byteorder="big")
57c9b05471e3558cae1a0d36dd3089b4d180faeb
703,277
def _read_files(file_names): """ Reads content from all specified file names Args: file_names: set of file names Returns: list of lines from all files """ all_lines = [] for file_name in file_names: try: with open(file_name) as f: lines = f.read().splitlines() all_lines.extend(lines) except Exception as e: print("Skipping: {0}".format(file_name)) return all_lines
98fabeeaeaf6dd142acaf7cf84c0ac25583bcdbf
703,278
def create_track_log(db, sessionID): """ Instantiate the Track History Collection. :param db: The database object. :param sessionID: Current user's session ID. :return: The Track History Collection object. """ collection_name = 'track_history_' + sessionID track_collection = db[collection_name] return track_collection
5fb72ae83e5a805ad8e35f62c9474e51170d3fb2
703,279
def InvertDepthNorm( depth, maxDepth=1000.0, minDepth=10, transform_type='inverse' ): """Renormalizes predictions back to targets space""" if transform_type == 'inverse': return maxDepth / depth elif transform_type == 'scaled': return depth * minDepth elif transform_type == 'log': return (10 ** depth) * minDepth
634ae5d7e3e92b84328c42683fe321d8c8ab7ced
703,280
import torch def gen_noise_Gaussian(num_instance, n_dim=2): """generate n-dim Gaussian random noise""" return torch.randn(num_instance, n_dim)
77237cf7a81408fae9099d4647e30c53e9866ab3
703,281
def detect_api_mismatch(ref_api, other_api, *, ignore=()): """Returns the set of items in ref_api not in other_api, except for a defined list of items to be ignored in this check. By default this skips private attributes beginning with '_' but includes all magic methods, i.e. those starting and ending in '__'. """ missing_items = set(dir(ref_api)) - set(dir(other_api)) if ignore: missing_items -= set(ignore) missing_items = set(m for m in missing_items if not m.startswith('_') or m.endswith('__')) return missing_items
4353f3f6b825570e3193b57dbb08c3a26c7f59b9
703,282
def unbatch_padded(x, lens_x): """Make a list of individual batch elements with padded (masked) entries omitted""" x_split = x.chunk(x.shape[0], dim=0) x_clean = [x_split[i].reshape(-1, 2)[:lens_x[i]].detach().cpu().numpy() for i in range(len(lens_x))] return x_clean
8042076f8637ced12ba31e077198de6ed6841145
703,283
from typing import Iterable def deepmap(func, obj): """Deep traverse obj, and apply func to each of its non-Iterable elements""" if isinstance(obj, Iterable): return [deepmap(func, x) for x in obj] else: return func(obj)
418d3342c86c422f5d4231030d66c03a08e89a9d
703,284
def factorial(num): """Finds the factorial of the input integer. :arg num: an integer """ #If the number provided is zero then the factorial is 1 if num == 0: fact = 1 #Otherwise set fact to 1 and begin finding the factorial r is #used to find each num-n for n=0 to n=num each value in r is #then used to compute the factorial else: fact = 1 r = list(range(1,num+1)) for i in r: fact *= i return fact
0dc8935c5d25acbc9d1d9dff1e86d5b6fcf80638
703,286
def MaybeEmulateMultiBleu(nltk_target_fn): """Includes emulate_multibleu argument into nltk_target_fn if necessary. The signature of the NLTK functions corpus_bleu and sentence_bleu depend on the NLTK version. This function works around version differences encountered in the public and internal environments. Args: nltk_target_fn: a function that computes BLEU given arguments gold and predicted. Returns: a function that takes arguments gold and predicted, in the format expected by NLTK's corpus_bleu and sentence_bleu functions. """ fn = nltk_target_fn return fn
476770fb9e025360ab9dbeaa71b8a0cc7bdaa96d
703,287
import itertools def combine_targets_and_zeros(target_strings, php_zero_strings, zero_strings, n_collisions): """Combines the zero strings with the target strings until the desired number of collisions is reached""" i = 0 ret = [] while True: for php_zero in php_zero_strings: for middle_zeros in itertools.product(zero_strings, repeat=i): for t in target_strings: ret.append(php_zero + ''.join(middle_zeros) + t) if len(ret) == n_collisions: return ret i += 1
4ba76f2b903ce3b7f6797aecd1027331631c282e
703,288
import numpy def get_storm_track_colours(): """Returns list of colours to use in plotting storm tracks. :return: rgb_matrix: 10-by-3 numpy array. rgb_matrix[i, 0] is the red component of the [i]th colour; rgb_matrix[i, 1] is the green component of the [i]th colour; rgb_matrix[i, 2] is the blue component of the [i]th colour. """ return numpy.array([ [187, 255, 153], [129, 243, 144], [108, 232, 181], [88, 213, 221], [69, 137, 209], [52, 55, 198], [103, 37, 187], [161, 23, 175], [164, 10, 107], [153, 0, 25] ], dtype=float) / 255
69acc4f2a666a86045f10aefe3ffa96bea8e99d0
703,289
def replace_number_chunks(msg, tar='?'): """ Replace digits and adjacent alphabets with the given term @param msg: Input message @type msg: String @param tar: New term to replace @type tar: String @return: Replaced message @rtype: String """ def find_first_digit(msg): for i, c in enumerate(msg): if c.isdigit(): return i return -1 def find_prev_escape(msg, idx): for i, c in reversed(list(enumerate(msg[:idx]))): if not c.isdigit() and not c.isalpha(): return i return 0 def find_next_escape(msg, idx): for i, c in enumerate(msg[idx + 1:]): if not c.isdigit() and not c.isalpha(): return i + idx + 1 return -1 for _ in range(50): digit_idx = find_first_digit(msg) if digit_idx == -1: return msg prev_idx = find_prev_escape(msg, digit_idx) next_idx = find_next_escape(msg, digit_idx) if next_idx == -1: msg = msg[:prev_idx + 1] + tar else: msg = msg[:prev_idx + 1] + tar + msg[next_idx:] return msg
16466501a41b6f97150264e89cc999b61291be9e
703,290
from typing import Iterable import torch def fuse_single_qubit_operators( qubits: Iterable[int], operators: Iterable[torch.Tensor], ): """Multiply together gates acting on various single qubits. Suppose that we have a sequence of single-qubit gates that should act, one after the other, on designated qubits. If any qubit repeats, then we can matrix multiply the operators to reduce to a single matrix acting on each qubit. This function performs this operation and collects the "fused" operations in a simple dict. If the argument `qubits` is [2, 4, 2, 7] and the `operators` is a list with length four as in A = operators[0] B = operators[1] C = operators[2] D = operators[3], then this function will return the dict { 2: CA 4: B 7: D } where CA is the matrix multiplication of torch.matmul(C, A). Args: qubits: Iterable of integers giving the qubits that gates act on. operators: Iterable of Tensor objects specifying single qubit operators. The size should be (*batch_dims, 2, 2). Returns: Dict mapping qubits to act on to a fused gate. """ qubits_to_fused_ops = dict() for q, op in zip(qubits, operators): if q in qubits_to_fused_ops: qubits_to_fused_ops[q] = torch.matmul(op, qubits_to_fused_ops[q]) else: qubits_to_fused_ops[q] = op return qubits_to_fused_ops
640541a3b0a79deb819bafad5734aca3e0dde23d
703,291
def parse_visitor_score(d): """ Used to parse score of visiting team. """ string_value = d.get("uitslag", " 0- 0") (_, v) = string_value.replace(" ", "").split("-") return int(v)
df286924774823ca250b71fcb060278093a7611b
703,292
import os def get_written_file_path(line): """ Handles the acquisition of the path string for a written file. It is used to handle linux problems with windows style path strings. :param line: current line of the pandalog :return: path to the written file """ fixed_substring = u'filename,' index = line.find(fixed_substring) line = line[index:] return os.path.normpath(line.strip().split(',')[1].split(')')[0])
e6ff40bcfff2b7ae09a405e20c947bf0324c94a7
703,293
def tag_index(idx): """Return a mapping of tag names to index items. """ tagidx = dict() for i in idx: for t in i.tags: if t not in tagidx: tagidx[t] = set() tagidx[t].add(i) return tagidx
df3ee2a934bfe3c814a9c1ded8d83314064f38bd
703,294
def node_text(node): """Needed for things like abstracts which have internal tags (see PMID:27822475)""" if node.text: result = node.text else: result = "" for child in node: if child.tail is not None: result += child.tail return result
076967e644cc99b7339f0cce9f8396a713e61999
703,295
def construct_bbox(all_points): """ Construct the bounding box based on all points from the road and buildings that were discretised. """ maximum = list(map(max, zip(*all_points))) minimum = list(map(min, zip(*all_points))) bbox = [(minimum[0], minimum[1]), (minimum[0], maximum[1]), (maximum[0], maximum[1]), (maximum[0], minimum[1]), (minimum[0], minimum[1])] return bbox
77f01466bbef500c7c79eee39ad3a69926ec8fc3
703,296
import os import zipfile def unzip_files(file_list, force=False): """Given a list of file paths, unzip them in place. Attempts to skip it if the extracted folder exists. Parameters ---------- file_list : list of str Files to extract. force : bool, default=False Force the unzip if the file exists. Returns ------- List of created outputs. """ result_list = [] for zip_path in file_list: working_dir = os.path.dirname(zip_path) zip_name = os.path.splitext(os.path.basename(zip_path))[0] new_folder_path = os.path.join(working_dir, zip_name) if force or not os.path.exists(new_folder_path): with zipfile.ZipFile(zip_path, 'r') as myzip: # Create a directory of the same name as the zip. os.makedirs(new_folder_path) myzip.extractall(path=new_folder_path) result_list.append(new_folder_path) return result_list
185c52329a3b85cf5a25b9f91e12647917fecee3
703,297
import math def dist(p1, p2): """ Determines the straight line distance between two points p1 and p2 in euclidean space. """ d = math.sqrt(math.pow(p1[0] - p2[0], 2) + math.pow(p1[1] - p2[1], 2)) return d
8a72ba5966452e7ac2e44f4c1f61d78071423ace
703,298
def resize(x, p=2): """Resize heatmaps.""" return x**p
b39b25e3c35b1bfa4e76deb638b77ba3fca8c781
703,299
def pointIsInside(x,y): """pointIsInside Arguments: x,y -- x and y coordinates of the point. returns true if it is inside of the Circumference. """ return x**2 + y**2 <= 1.
16e32c8705e08e868355f3bda5a9b5ae6d6a6f8c
703,300
import optparse def parse_commandline(): """ returns (files, test_mode) created from the command line arguments passed to pytddmon. """ usage = "usage: %prog [options] [static file list]" version = "%prog " + '1.0.8' parser = optparse.OptionParser(usage=usage, version=version) parser.add_option( "--log-and-exit", action="store_true", default=False, help='Run all tests, write the results to "pytddmon.log" and exit.') parser.add_option( "--log-path", help='Instead of writing to "pytddmon.log" in --log-and-exit, ' + 'write to LOG_PATH.') parser.add_option( "--gen-kata", help='Generate a stub unit test file appropriate for jump ' + 'starting a kata') parser.add_option( "--no-pulse", dest="pulse_disabled", action="store_true", default=False, help='Disable the "heartbeating colorshift" of pytddmon.') (options, args) = parser.parse_args() return ( args, options.log_and_exit, options.log_path, options.pulse_disabled, options.gen_kata)
5b98e682e05514585dcb488f4386b1a1611f6b5a
703,301
from typing import List import glob def get_l10n_files() -> List[str]: """取得所有翻譯相關檔案列表,包括.pot .po .mo。 Returns: List[str]: 翻譯相關檔案列表,包括.pot .po .mo。 """ po_parser = 'asaloader/locale/*/LC_MESSAGES/asaloader.po' pot_file = 'asaloader/locale/asaloader.pot' po_files = glob.glob(po_parser) mo_files = [po_file.replace('.po', '.mo') for po_file in po_files] files = [pot_file] + po_files + mo_files return files
a783679261e7c9bd617728946d01a440b51cfb6a
703,302
def filter_value(entry, values=None): """ Returns True if it should be filtered. Only take calls with filter values in the list provided if None provided, assume that filter_value must be PASS or blank '.' """ if values is None: return len(entry.filter) != 0 and 'PASS' not in entry.filter return values.intersection(entry.filter)
57ee5ab67fa07cb8c1379d303e9d636718025f45
703,303
def process_data(expected_keys, data): """ Check for any expected but missing keyword arguments and raise a TypeError else return the keywords arguments repackaged in a dictionary i.e the payload. :param expected_keys: :param data: :return payload: """ payload = {} for key in expected_keys: value = data.pop(key, False) if not value: raise TypeError("Missing value on key {0}".format(key)) else: payload[key] = value return payload
a7d9b87af72d5217cdd6c67ab27986780bea293a
703,304
def make_song_title(artists: list, name: str, delim: str) -> str: """ Generates a song title by joining the song title and artist names. Artist names given in list format are split using the given delimiter. """ return f"{delim.join(artists)} - {name}"
341db19af517c09633a6ebe37726c79c020f4780
703,305
def authorizeView(user, identifier): """ Returns True if a request to view identifier metadata is authorized. 'user' is the requestor and should be an authenticated StoreUser object. 'identifier' is the identifier in question; it should be a StoreIdentifier object. """ # In EZID, essentially all identifier metadata is public. return not identifier.isAgentPid or user.isSuperuser
c831d74a229043a308226d6ae8078e5630507ded
703,306
def clean_keyword(kw): """Given a keyword parsed from the header of one of the tutorials, return a 'cleaned' keyword that can be used by the filtering machinery. - Replaces spaces with capital letters - Removes . / and space """ return kw.strip().title().replace('.', '').replace('/', '').replace(' ', '')
eb8ab983bf60f5d1ca2996dc9568ded252d00479
703,307
from typing import Union from pathlib import Path import hashlib def compute_md5(path: Union[str, Path], chunk_size: int): """Return the MD5 checksum of a file, calculated chunk by chunk. Parameters ---------- path : str or Path Path to the file to be read. chunk_size : int Chunk size used to calculate the MD5 checksum. """ md5 = hashlib.md5() with open(str(path), "rb") as f: for chunk in iter(lambda: f.read(chunk_size), b""): md5.update(chunk) return md5.hexdigest()
9e718630323b002307a54e7d3bbf936b6b94637a
703,308
def denormalize(images, min_, max_): """scales image back to min_, max_ range""" return [((i + 1) / 2 * (max_ - min_)) + min_ for i in images]
3071e3c76754bda8ea2ce9607003cfd1b4f97e48
703,309
def version_is_locked(version): """ Determine if a version is locked """ return getattr(version, "versionlock", None)
b2f76d89c2d0082ad13d5d896e5360d394c83ee1
703,310
import struct def compute_checksum(message): """Calculates the 16-bit one's complement of the one's complement sum of a given message.""" # If the message length isn't a multiple of 2 bytes, we pad with # zeros if len(message) % 2: message += struct.pack('x') # We build our blocks to sum to_sum = struct.unpack('!%dH' % (len(message)/2), message) # UDP checksum checksum = 0 for v in to_sum: checksum += v if checksum > 2**16: checksum = (checksum & 0xFFFF) + 1 return 0xFFFF - checksum
30b9665d8fce75d0b55b43f025b9c7c755507522
703,311
def mockup_return(*args, **kwargs): """Mockup to replace regular functions for error injection.""" return False
92172e58a11e48a09c8f181ac55aa717b5fbb94d
703,312
def format_input(param): """Return a string with all the inputs property formatted. """ tmp1 = ' {type} {var}{ctor}; XC_LI_.load({var});\n' tmp2 = ' {type} {var}{ctor}; XC_LI_.load({var}, {sample});\n' inputs = '' for par in param: if 'sample' in par: inputs += tmp2.format(type=par['type'], var=par['name'][1:-1], ctor=par['ctor'], sample=par['sample']) else: inputs += tmp1.format(type=par['type'], var=par['name'][1:-1], ctor=par['ctor']) return inputs
41b4c26369e3be43b5466c9c375609571d5a04a4
703,313
import logging import os import json def validate_args(args): """ """ if not args.output.endswith(".tf"): logging.exception(f'Output filename should end with .tf (i.e output.tf)') raise ValueError('Output filename should end with .tf (i.e output.tf)') if args.input == "ALL": return None elif args.input.endswith(".json"): if not os.path.isfile(args.input): logging.exception(f'Input file "{args.input}" not found!!!') raise FileNotFoundError(f'Input file "{args.input}" not found!!!') with open(args.input, "r") as f: dd_resource_ids = json.load(f) return dd_resource_ids else: dd_resource_ids = {} ids = args.input.split(',') if args.type in ["monitor"]: if not all(id.isnumeric() for id in ids): logging.exception(f'Invalid Ids number input.') raise ValueError(f'Invalid Ids number input.') dd_resource_ids[args.type] = [int(id) for id in args.input.split(',')] else: dd_resource_ids[args.type] = args.input.split(',') return dd_resource_ids
9a1328b27e741999b8383f63abbe0caccd24e518
703,314
def add_codes(err_cls): """Add error codes to string messages via class attribute names.""" class ErrorsWithCodes(object): def __getattribute__(self, code): msg = getattr(err_cls, code) return "[{code}] {msg}".format(code=code, msg=msg) return ErrorsWithCodes()
24ec122c290628c218a01867824fda681c4e7e88
703,315