content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def recovery_invalid_token( ) -> str: """Return a valid auth token""" return 'wrong'
38ff965ffa7b579965e479ca1d676a4b40978772
703,882
def get_parameter_name(argument): """Return the name of the parameter without the leading prefix.""" if argument[0] not in {'$', '%'}: raise AssertionError(u'Unexpectedly received an unprefixed parameter name, unable to ' u'determine whether it is a runtime or tagged parameter: {}' .format(argument)) return argument[1:]
54b51cd5e3239fbfaaccaad123975df0e84374fc
703,883
import hashlib def cmpHash(file1, file2): """Compare the hash of two files.""" hash1 = hashlib.md5() with open(file1, 'rb') as f: hash1.update(f.read()) hash1 = hash1.hexdigest() hash2 = hashlib.md5() with open(file2, 'rb') as f: hash2.update(f.read()) hash2 = hash2.hexdigest() return hash1 == hash2
891b71188de42fb9c30a6559cd22b39685b6fc13
703,884
def bfmt(num, size=8): """ Returns the printable string version of a binary number <num> that's length <size> """ if num > 2**size: return format((num >> size) & (2**size - 1), 'b').zfill(size) try: return format(num, 'b').zfill(size) except ValueError: return num
8aadc9671643b48c7c05032473b05fd872475bb0
703,887
def check_barcode_is_off(alignment, tags, log=None): """ See if the barcode was recognised with soft clipping. if so, it returns True and can be counted in the optional log :param alignment: the read :param tags: alignment tags as dict :return: """ if 'RG' in tags: if tags['bm'] != '0': if log: log.misbar(alignment) return True else: return False else: return False
7adcbb8eae797750b3e543c52db41341d82f0937
703,888
def already_visited(string): """ Helper method used to identify if a subroutine call or definition has already been visited by the script in another instance :param string: The call or definition of a subroutine/function :return: a boolean indicating if it has been visited already or not """ separated = string.partition('(')[2] if separated.replace(' ', '').replace('(', '')[:2] == 'gr': visited = True else: visited = False return visited
7a9d84b6e04cdf7edb27bb7cf49cf1021130ab07
703,889
import pathlib def get_stem_name(file_name: pathlib.Path | str | None) -> str: """Get the stem name from a file name. Args: file_name (pathlib.Path | str | None): File name or file path. Returns: str: Stem name. """ if file_name is None: return "" if isinstance(file_name, str): file_name = pathlib.Path(file_name) return file_name.stem
01bab045f2c54aedf848922550ae241c9ddf8bce
703,890
def getSingleIndexedParamValue(request, param_name, values=()): """Returns a value indexed by a query parameter in the HTTP request. Args: request: the Django HTTP request object param_name: name of the query parameter in the HTTP request values: list (or tuple) of ordered values; one of which is retrieved by the index value of the param_name argument in the HTTP request Returns: None if the query parameter was not present, was not an integer, or was an integer that is not a valid [0..len(values)-1] index into the values list. Otherwise, returns values[int(param_name value)] """ value_idx = request.GET.get(param_name) if isinstance(value_idx, (tuple, list)): # keep only the first argument if multiple are present value_idx = value_idx[0] try: # GET parameter 'param_name' should be an integer value index value_idx = int(value_idx) if value_idx is not None else -1 except ValueError: # ignore bogus or missing parameter values, so return None (no message) return None if value_idx < 0: # value index out of range, so return None (no value) return None if value_idx >= len(values): # value index out of range, so return None (no value) return None # return value associated with valid value index return values[value_idx]
c8a1a552d1ad9435e21243bf05226b373257d163
703,891
def _call(calls): """Make final call""" final_call = '' if calls['is_hiv'] == 'No': final_call = 'NonHIV' return final_call if calls['deletion'] == 'Yes': final_call = 'Large Deletion' if calls['inversion'] == 'Yes': final_call += ' with Internal Inversion' elif calls['hypermut'] == 'Yes': final_call += ' with Hypermut' return final_call if calls['inversion'] == 'Yes': final_call = 'Internal Inversion' return final_call if calls['hypermut'] == 'Yes': final_call = 'Hypermut' return final_call if calls['psc'] == 'Yes': final_call = 'Premature Stop Codon' return final_call if calls['defect'] == 'Yes' and calls['primer'] == 'Yes': final_call = "5' defect" return final_call if calls['primer'] == 'No': final_call = 'Inferred Intact' return final_call return 'Intact'
c5e293255911cfdb16a73a026a13b7a394ae71cc
703,892
import math def get_sierpinski_carpet_set(width, height): """ 获得谢尔宾斯基地毯点集 :param width: :param height: :return: 谢尔宾斯基地毯点集 """ def get_sierpinski_carpet_points(left, top, right, bottom): """ 递归获取谢尔宾斯基地毯的点 :param left: :param top: :param right: :param bottom: :return: """ w, h = right - left + 1, bottom - top + 1 if w != h or w < 3: return [] sub_len = int(w / 3) # 小正方形边长 screen_points = [] # 递归 for row in range(3): for col in range(3): x1, y1 = left + sub_len * row, top + sub_len * col x2, y2 = x1 + sub_len - 1, y1 + sub_len - 1 if row == 1 and col == 1: # 中间块 for i in range(x1, x2 + 1): for j in range(y1, y2 + 1): screen_points.append((i, j)) else: # 周围8块 screen_points.extend(get_sierpinski_carpet_points(x1, y1, x2, y2)) return screen_points # 递归获取谢尔宾斯基地毯的点 square_len = int(math.pow(3, int(math.log(min(width, height), 3)))) # 大正方形边长,应为3的指数 sierpinski_carpet_points = get_sierpinski_carpet_points(0, 0, square_len - 1, square_len - 1) print('Got {} Sierpinski carpet points in area {} * {}.'.format(len(sierpinski_carpet_points), width, height)) return sierpinski_carpet_points
b66bbe0dd25b47d81089b35d21a255b3b56c1f1e
703,894
def destroy_asteroids(angles): """Destroy asteroids, start with laser pointing up and rotate clockwise.""" destroy_list = [] sorted_angles = sorted(angles) while sorted_angles: for angle in sorted_angles: if not angles[angle]: sorted_angles.remove(angle) else: asteroids = sorted(angles[angle]) to_remove = asteroids[0] angles[angle].remove(to_remove) destroy_list.append((angle, to_remove)) return destroy_list
166fbd4e87152748b1d6527315fb87a91b617b7a
703,895
import json def add_filename(json_): """ Args: string: json path Returns: dict: annotion label """ with open(json_) as f: imgs_anns = json.load(f) img_extension = json_.split('/')[-1].split('.')[0]+'.jpg' imgs_anns['filename'] = img_extension return imgs_anns
3710a1d6f177b36c6786797f9af5c58cd527f354
703,896
from typing import OrderedDict def sort_ordered_games_list(ordered_games_lists): """ Reverses as sorts ordered games lists alphabetically """ new_order = OrderedDict() for group, games in reversed(ordered_games_lists.items()): new_order[group] = OrderedDict( sorted(games.items(), key=lambda x: x[1].title)) return new_order
bc1b40884ad450d8a2b447c2628bfe77cd2797fa
703,897
import time import os import subprocess def get_map_mrr(qids, predictions, labels, device=0, keep_results=False): """ Get the map and mrr using the trec_eval utility. qids, predictions, labels should have the same length. device is not a required parameter, it is only used to prevent potential naming conflicts when you are calling this concurrently from different threads of execution. :param qids: query ids of predictions and labels :param predictions: iterable of predictions made by the models :param labels: iterable of labels of the dataset :param device: device (GPU index or -1 for CPU) for identification purposes only """ qrel_fname = 'trecqa_{}_{}.qrel'.format(time.time(), device) results_fname = 'trecqa_{}_{}.results'.format(time.time(), device) qrel_template = '{qid} 0 {docno} {rel}\n' results_template = '{qid} 0 {docno} 0 {sim} castor-model\n' with open(qrel_fname, 'w') as f1, open(results_fname, 'w') as f2: docnos = range(len(qids)) for qid, docno, predicted, actual in zip(qids, docnos, predictions, labels): f1.write(qrel_template.format(qid=qid, docno=docno, rel=actual)) f2.write(results_template.format(qid=qid, docno=docno, sim=predicted)) trec_eval_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'trec_eval-9.0.5/trec_eval') trec_out = subprocess.check_output([trec_eval_path, '-m', 'map', '-m', 'recip_rank', qrel_fname, results_fname]) trec_out_lines = str(trec_out, 'utf-8').split('\n') mean_average_precision = float(trec_out_lines[0].split('\t')[-1]) mean_reciprocal_rank = float(trec_out_lines[1].split('\t')[-1]) if keep_results: print("Saving prediction file to {}".format(results_fname)) print("Saving qrel file to {}".format(qrel_fname)) else: os.remove(results_fname) os.remove(qrel_fname) return mean_average_precision, mean_reciprocal_rank
0ddc68fd12afebd2954e0c71e1bc58ffd0f6a1bf
703,898
from typing import Union from typing import Dict from typing import Optional def get_config_float( current: Union[int, float], config: Dict[str, str], name: str ) -> Optional[float]: """ Convenience function to get config values as float. :param current: current config value to use when one is not provided :param config: config to get values from :param name: name of config value to get :return: current config value when not provided, new value otherwise """ value = config.get(name) if value is not None: if value == "": value = None else: value = float(value) else: value = current return value
d2bb436c4b2b4aef35a8f46927bc9145ecfed04c
703,900
def getRatingDistributionOfAMovie(ratingRDD, movieID): """ Get the rating distribution of a specific movie Args: ratingRDD: a RDD containing tuples of (UserID, MovieID, Rating) movieID: the ID of a specific movie Returns: [(rating score, number of this rating score)] """ return ratingRDD.filter(lambda x: x[1] == movieID).map(lambda x: (x[2], 1)).countByKey()
708c67e51d318b887deea1ec3ec4dc4a272e794e
703,901
def lower_allbutfirst_letter(mystring): """Lowercase all letters except the first one """ return mystring[0].upper() + mystring[1:].lower()
860d1449865790e15ccc840ee85ea366b2de5a64
703,902
import random def strtest(aString): """this function takes the string and returns the string in a random order""" newstring = random.sample(aString, len(aString)) newstring = "".join(newstring) return(newstring)
28bb6ed6b9f3a10ea19fbebb8ef60091123b0fd5
703,903
def mongo_uses_error_check(store): """ Does mongo use the error check as a separate message? """ if hasattr(store, 'modulestores'): return any(mongo_uses_error_check(substore) for substore in store.modulestores) return False
52d4a5135531ff18b0e19ac7aa91a453a2e736f1
703,904
def bedLine(chrom, chromStart, chromEnd, name, score=None, strand=None, thickStart=None, thickEnd=None, itemRgb=None, blockCount=None, blockSizes=None, blockStarts=None): """ Give the fields, create a bed line string """ s = ('%s %d %d %s' % (chrom, chromStart, chromEnd, name)) if score is not None: for v in [strand, thickStart, thickEnd, itemRgb, blockCount, blockSizes, blockStarts]: assert(v is not None) s += (' %d %s %d %d %s %d %s %s' % (score, strand, thickStart, thickEnd, itemRgb, blockCount, blockSizes, blockStarts)) return s
dd294d5d31ea3a2f7beb8a11a7ec705eb10cf1a4
703,906
def fatorial(num): """ Calcula a fatorial :param num: :return: """ fat = 1 if num == 0: return fat for i in range(1,num+1,1): fat *= i # fat = fat * i return fat
181ea2bde3acef3f6ff4311054fb209edfad6160
703,907
def GePacketOut(egress_port, mcast, padding): """ Generate packet_out packet with bytearray format """ out1 = "{0:09b}".format(egress_port) out2 = "{0:016b}".format(mcast) out3 = "{0:07b}".format(padding) out = out1+out2+out3 a = bytearray([int(out[0:8],2),int(out[8:16],2),int(out[16:24],2),int(out[24:32],2)]) return a
c56abb84ec067cf8abb8e69d82c1342c4f20e0e9
703,908
def cmp_public_numbers(pn1, pn2): """ Compare 2 sets of public numbers. These is a way to compare 2 public RSA keys. If the sets are the same then the keys are the same. :param pn1: The set of values belonging to the 1st key :param pn2: The set of values belonging to the 2nd key :return: True is the sets are the same otherwise False. """ if pn1.n == pn2.n: if pn1.e == pn2.e: return True return False
a91a7204412d07808dbd6d5040f6df8baa576417
703,909
def ranges(int_list): """ Given a sorted list of integers function will return an array of strings that represent the ranges """ begin = 0 end = 0 ranges = [] for i in int_list: # At the start of iteration set the value of # `begin` and `end` to equal the first element if begin == 0: begin = i end = i # Set the current element as the value of `end` # as long as the array is in sequence elif i-1 == end: end = i # Reset flags to current element when iterating through # multiple integers that are of broken sequence elif begin == end: begin = i end = i else: # Sequence of array has been broken, append current range # to `ranges` and set the value of `begin and `end` flags to # equal the current element ranges.append("{0}->{1}".format(begin, end)) begin = i end = i # Grab the last range from the array if begin != end: ranges.append("{0}->{1}".format(begin, end)) return ranges
cc6aab9442a6f6986acccb1fa46cd61ff1e4ba07
703,910
def format_data(x_data=None,y_data=None): """ ============================================================================= Function converts a list of separate x and y coordinates to a format suitable for plotting in ReportLab Arguments: x_data - a list of x coordinates (or any object that can be indexed) y_data - a list of y coordinates (or any object that can be indexed) Returns: A tuple of tuples containing the paired data Notes: The output from this function should be in a list for plotting in ReportLab. Multiple items in the list represent multiple data sets ----------------------------------------------------------------------------- """ assert(x_data is not None) assert(y_data is not None) assert(len(x_data) == len(y_data)) # store data data = [] for x,y in zip(x_data,y_data): data.append((x,y)) data = tuple(data) return data
34ac418f38194644f9372f20d47535424c7bfb52
703,911
import json import os import sys def write_plugins_index(file_name, plugins): """ Writes the list of (name, version, description) of the plugins given into the index file in JSON format. Returns True if the file was actually updated, or False if it was already up-to-date. """ # separators is given to avoid trailing whitespaces; see docs plugin_contents = [] for (name, version, description) in plugins: plugin_contents.append({"name": name, "version": version, "description": description}) contents = json.dumps(plugin_contents, indent=2, separators=(",", ": "), sort_keys=True) if os.path.isfile(file_name): if sys.version_info < (3,): mode = "rU" else: # universal newlines is enabled by default, and specifying it # will cause deprecation warnings mode = "r" with open(file_name, mode) as f: current_contents = f.read() else: current_contents = "" if contents.strip() != current_contents.strip(): with open(file_name, "w") as f: f.write(contents + "\n") return True else: return False
52d110bb0e90c661f95b144fa95d6143d7e719a7
703,912
def get_console_scripts(entry_points): """pygradle's 'entrypoints' are misnamed: they really mean 'consolescripts'""" if not entry_points: return None if isinstance(entry_points, dict): return entry_points.get("console_scripts") if isinstance(entry_points, list): result = [] in_console_scripts = False for line in entry_points: line = line.strip() if line and line.startswith("["): in_console_scripts = "console_scripts" in line continue if in_console_scripts: result.append(line) return result return get_console_scripts(entry_points.split("\n"))
4ca1f6bb50959570c1c6d28312aabb939fe9daf8
703,913
def flatten(array: list): """Converts a list of lists into a single list of x elements""" return [x for row in array for x in row]
178f8ddb6e4b4887e8c1eb79f32fe51c0cf5fd89
703,914
def keyevent2tuple(event): """Convert QKeyEvent instance into a tuple""" return (event.type(), event.key(), event.modifiers(), event.text(), event.isAutoRepeat(), event.count())
a456ce7790232ecf8ea4f6f68109a2023f4f257b
703,915
import math def get_inv_unit(block_index,diff): """ given a block index and a 0-indexed layer in that block, returns a unit index. """ bottleneck_block_mapping = {1:0, 2:3, 3:7, 4:13} return bottleneck_block_mapping[block_index] + math.floor((abs(diff-1)/3))
ed6936a81dd8f32f76a27efcf89b8e76d384b008
703,916
def liste_erreur(estimation, sol): """ Renvoie une liste d'erreurs pour une estimation et un pas donnés. Paramètres ---------- estimation : estimation calculée pour la résolution de l'équation différentielle sol : solution exacte """ (x,y) = estimation sol = list(sol) erreur = [] if len(y) != len(sol[1]): raise ValueError ("L'estimation et la solution ne sont pas de la même longueur.") for i in range(len(y)): erreur.append(abs(y[i]-sol[1][i])) return erreur
61a800f1316a153d50e39ce80239ddd4b841f74e
703,917
import math def find_roots_quadratic(a: float, b: float, c: float) -> set: """Return a set containing the solutions to the equation ax^2 + bx + c = 0. Each solution is a float. You may ASSUME that: - a != 0 - (b * b) - (4 * a * c) >= 0 >>> find_roots_quadratic(1, -15, 56) == {8.0, 7.0} True >>> find_roots_quadratic(1, -10, 21) == {3.0, 7.0} True >>> find_roots_quadratic(1, 8, 15) == {-3.0, -5.0} True >>> # Have to use isclose to compare floats >>> all([math.isclose(sol, -0.739, abs_tol=0.001) or math.isclose(sol, 1.739, abs_tol=0.001) for sol in find_roots_quadratic(7, -7, -9)]) True Hint: use the quadratic formula. """ assert a != 0 assert (b * b) - (4 * a * c) >= 0 part = math.sqrt(b * b - 4 * a * c) return {(-b - part) / (2 * a), (-b + part) / (2 * a)}
664f3ec213200ac2ed3a1cc4f8001da4331938bc
703,918
def trick_for_mountaincar(state, done, reward, state_): """ -1 for each time step, until the goal position of 0.5 is reached. As with MountainCarContinuous v0, there is no penalty for climbing the left hill, which upon reached acts as a wall. state[0] means position: -1.2 ~ 0.6 state[1] velocity: -0.07 ~ 0.07 """ return abs(state_[1])
7ef703f6df9c1d10a250c18cb85def2702a3378d
703,919
def edge_failure_sampling(failure_scenarios,edge_column): """Criteria for selecting failure samples Parameters --------- failure_scenarios - Pandas DataFrame of failure scenarios edge_column - String name of column to select failed edge ID's Returns ------- edge_failure_samples - List of lists of failed edge sets """ edge_failure_samples = list(set(failure_scenarios[edge_column].values.tolist())) return edge_failure_samples
91c251241dcde7d457b69b2033a1751b3ae963fd
703,920
import time import random def my_solver(filename: str) -> str: """Dummy solver function. It does nothing apart from waiting on average 2.5sec :type filename: object :Return: the same filename a the input """ print("Running my solver") time.sleep(random.random() * 2) return filename
8aac2ebe64e8c3d1596441942e4c9a348c977f8f
703,921
import re def count_arg_nums(method_signature): """ Based on the method signature(jni format) to count the arguments number. :param method_signature: method signature(jni format) :return: arguments number """ arg_signature = re.findall(re.compile(r'\((.*?)\)'), method_signature)[0] pattern = re.compile(r'(L.*?;)|([BCDFISJZ])|(\[[BCDFISJZ])') args = pattern.findall(arg_signature) args_num = len(args) return args_num # print(len(args)) # print(args)
6703653e26ced05baf1a639d93d6435ea8b6ff8e
703,922
def form_columns(form): """ :param form: Taken from requests.form :return: columns: list of slugified column names labels: dict mapping string labels of special column types (observed_date, latitude, longitude, location) to names of columns """ labels = {} columns = [] for k, v in form.items(): if k.startswith('col_name_'): # key_type_observed_date key = k.replace("col_name_", "") columns.append(key) # e.g labels['observed_date'] = 'date' labels[v] = key return columns, labels
a3a2fdaa17310c04bb28675f88976cd7283f65a9
703,923
def bin2int(buf): """the reverse of int2bin, convert a binary buffer to an integer""" x = 0 for b in bytearray(buf): x <<= 8 x |= b return x
0c0edd88d7d4157f60641bc05f810184ef56f133
703,924
import base64 import six def UrlSafeB64Decode(message): """wrapper of base64.urlsafe_b64decode. Helper method to avoid calling six multiple times for preparing b64 strings. Args: message: string or binary to decode Returns: decoded data in string format. """ data = base64.urlsafe_b64decode(six.ensure_binary(message)) return six.ensure_str(data)
f675c56f0bbd35661adfbea85135a9434fd7b107
703,925
def imei_parse_nibble(nibble): """Parse one nibble of an IMEI and return its ASCII representation.""" if nibble < 10: return chr(nibble + ord('0')) if nibble == 0xa: return '*' if nibble == 0xb: return '#' if nibble == 0xc: return 'C' if nibble == 0xd: return '.' if nibble == 0xe: return '!' return ''
837445a7679bc5355978d7d4e69c5c9fa166cb3f
703,926
def check_command_succeeded(reply): """ Return true if command succeeded, print reason and return false if command rejected param reply: BinaryReply return: boolean """ if reply.command_number == 255: # 255 is the binary error response code. print ("Danger! Command rejected. Error code: " + str(reply.data)) return False else: # Command was accepted return True
a320b5000f59790e314108398339b9a66dbf6520
703,927
def format_interval(seconds): """ Format an integer number of seconds to a human readable string.""" units = [ (('week', 'weeks'), 604800), (('day', 'days'), 86400), (('hour', 'hours'), 3600), (('minute', 'minutes'), 60), #(('second', 'seconds'), 1) ] result = [] for names, value in units: n, seconds = divmod(seconds, value) if n > 0: result.append('%d %s' % (n, names[n > 1])) if seconds: result.append("%.2f %s" % (seconds, ['second', 'seconds'][seconds != 1.0])) return ', '.join(result)
8deae4627807f4c5e0cc1844499ebb39f658f2d0
703,928
def split_on_comma(tokens): """Split a list of tokens on commas, ie ``,`` DELIM tokens. Only "top-level" comma tokens are splitting points, not commas inside a function or other :class:`ContainerToken`. :param tokens: An iterable of :class:`~.token_data.Token` or :class:`~.token_data.ContainerToken`. :returns: A list of lists of tokens """ parts = [] this_part = [] for token in tokens: if token.type == 'DELIM' and token.value == ',': parts.append(this_part) this_part = [] else: this_part.append(token) parts.append(this_part) return parts
8b89dc6857a7b3e9bcc02f3a291e0ff0cd8d5f20
703,929
def find(db, user): """ find the notelist :param db: :param user: :return: """ document = db.notelist.find_one({"_id": user}) return document
04c6ad64e9b8ff2f5cd5462cebb9d37127b6d176
703,930
import numpy def preprocess_image(x, mode='caffe'): """ Preprocess an image by subtracting the ImageNet mean. Args x: numpy.array of shape (None, None, 3) or (3, None, None). mode: One of "caffe" or "tf". - caffe: will zero-center each color channel with respect to the ImageNet dataset, without scaling. - tf: will scale pixels between -1 and 1, sample-wise. Returns The input with the ImageNet mean subtracted. """ # covert always to float32 to keep compatibility with opencv x = x.astype(numpy.float32) if mode == 'tf': x /= 127.5 x -= 1. elif mode == 'caffe': x -= [103.939, 116.779, 123.68] return x
a5fe2fe24e9dcef65a7f704a17ab14d4ddb72f97
703,931
def calculate_metric(threshold, args): """ 给定阈值得到预测类别后,计算precision和recall,其中precision是avg_sample(hit_cnt/pred_cnt), recall是avg_sample(hit_cnt/label_cnt),在当前场景下label_cnt恒等于1 """ # 格式:label:pred_cls,pred_score;pred_cls,pred_score; # 78089261739417600:78089261739417600,0.995399; # 获取label_list,按threshold划分得到pred_list label_list = [] pred_list = [] with open(args.search_result_file, mode='r') as f: results = f.readlines() for result in results: # print("result:", result) # label label = result.strip().split(':')[0] # 对于每个query图片,label是一个值 label_list.append(label) # pred pred_result = result.strip().split(':')[1].strip(';').split(';') # print("pred_result:", pred_result) pred_cls_list = [] # 对于每个query图片,pred是一个list for item in pred_result: try: pred_cls, pred_score = item.split(',') if float(pred_score) >= threshold: pred_cls_list.append(pred_cls) else: break except: print("item:", item) pred_list.append(pred_cls_list) # 求presision和recall assert len(label_list) == len(pred_list), "len(label_list) != len(pred_list)" sum_precision = 0.0 sum_recall = 0.0 """ tp: base里有该query,且检索结果是有 fp: base里没有该query,但检索结果是有 tn: base里没有该query,且检索结果是没有 fn: base里有该query,但检索结果是没有 """ tp = 0 fp = 0 tn = 0 fn = 0 for i in range(len(label_list)): if label_list[i] in pred_list[i]: # base里有该query,且检索结果是有 upper = 1 tp += 1 elif (label_list[i] == '0') and (len(pred_list[i]) == 0): # base里没有该query,且检索结果是没有 upper = 1 tn += 1 elif (label_list[i] == '0') and (len(pred_list[i]) != 0): # base里没有该query,但检索结果是有 upper = 0 fp += 1 else: # base里有该query,但检索结果是没有 upper = 0 fn += 1 lower_precision = max(len(pred_list[i]), 1) lower_recall = 1 sum_precision += upper / lower_precision sum_recall += upper / lower_recall precision = sum_precision / len(label_list) recall = sum_recall / len(label_list) pos_precision = tp / max(tp + fp, 1) pos_recall = tp / max(tp + fn, 1) pos_samples = tp + fn neg_precision = tn / max(tn + fn, 1) neg_recall = tn / max(tn + fp, 1) neg_samples = tn + fp return precision, recall, pos_precision, pos_recall, pos_samples, neg_precision, neg_recall, neg_samples
97bb7a81e9731a3f0c48000f98669d03f1d48ca4
703,932
import os import csv import random def load_data(): """Load data from the Quora dataset.""" # Partition off part of the train data for evaluation with open(os.path.join('data', 'quora', 'train.csv'), 'r') as train_file: train_data = [row for row in csv.reader(train_file, delimiter=',', quotechar='"')][1:] random.shuffle(train_data) # train_data = train_data[:50000] train_texts = [line[1] for line in train_data] train_cats = [{'INSINCERE': line[2] == '1'} for line in train_data] split = int(0.8*len(train_data)) return (train_texts[:split], train_cats[:split]), (train_texts[split:], train_cats[split:])
98d1b5241785cdb2f229fa076f64128aa4aaee29
703,933
from datetime import datetime def current_time() -> datetime: """Return timezone-aware current time as datetime.""" return datetime.now().astimezone()
2b7237f4c5a0d88ab7643dfdd3b1f8c524683884
703,934
def make_label(label_text): """ returns a label object conforming to api specs given a name """ return { 'messageListVisibility': 'show', 'name': label_text, 'labelListVisibility': 'labelShow' }
8c388d138136af4f01ec02db1565d66049b38cf1
703,935
import torch def _empty_memory(memory_dim): """Get a empty memory, assuming the memory is a row vector """ return torch.zeros(1, memory_dim)
b7454e52bbc3c20061d53716c2e805603eb041ff
703,937
def mult(A, B): """ Function to multiply two values A and B, use as "mult(A, B)" """ return A * B
586c9077303dd8a36ae6007ff74756f77ec8fb3b
703,939
def get_link_href(result_object, link_relation): """ Given a result_object (returned by a previous API call), return the link href for a link relation. 'result_object' a JSON object returned by a previous API call. May not be None. 'link_relation' the link relation for which href is required. Returns None if the link does not exist. """ # Argument error checking. assert result_object is not None result = None link = result_object['_links'].get(link_relation) if link: result = link.get('href') return result
400cd38d1b29ea71bf974d8aa16c1b3adf104428
703,940
import sys def _are_we_frozen(): """Returns whether we are frozen via py2exe. This will affect how we find out where we are located.""" return hasattr(sys, "frozen")
a9e55631ce9f8d60351e41d257c225f859e39a05
703,941
import re def rx_filter(objs: list, attr: str, prompt: str) -> list: """ Filter a list of dicts based on user-entered regex match to one of their values. """ while True: search_term = input(prompt+" ") # Prefer exact match first -- otherwise can never select an item that's a substring of another! matches = [obj for obj in objs if obj[attr] == search_term] # matches = [obj for obj in objs if attr(obj) == search_term] if matches: return matches rx_flags = 0 # If search doesn't have uppercase letters, make it case-insensitive. if search_term == search_term.lower(): rx_flags |= re.IGNORECASE rx = re.compile(search_term, rx_flags) matches = [obj for obj in objs if rx.search(obj[attr])] # matches = [obj for obj in objs if rx.search(attr(obj))] if matches: return matches print("No matches, try again.")
f0c6dd5609020054da7895e577483c911d9aaea3
703,942
def _get_usb_hub_map(device_info_list): """Creates a map of usb hub addresses to device_infos by port. Args: device_info_list (list): list of known usb_connections dicts. Returns: dict: map of usb hub addresses to device_infos by port """ map_usb_hub_ports = {} for device_info in device_info_list: hub_address = device_info['usb_hub_address'] port = device_info['usb_hub_port'] if hub_address: if hub_address not in map_usb_hub_ports: map_usb_hub_ports[hub_address] = {} if not map_usb_hub_ports[hub_address].get( port) or device_info['ftdi_interface'] == 2: map_usb_hub_ports[hub_address][port] = device_info return map_usb_hub_ports
eaadc4713a41fdf38cea4fce35806d1d8772df27
703,943
import os import subprocess def get_current_commit(srcdir): """Return information about git commit checked out in the given directory. :param srcdir: source code directory :type srcdir: str :return: commit information composed of brief SHA1 and subject :rtype: str """ os.chdir(srcdir) return ( subprocess.check_output('git log --pretty=format:"%h %s" -n 1', shell=True) .decode() .rstrip("\r\n") )
3b3601303135bfdfe66cb069ef7d4ed1f413af8b
703,944
import re def parse_pgsql_logs(data): """ Parse the pgsql benchmark data from ripsaw and return the data in list format Args: data (str): log data from pgsql bench run Returns: list_data (list): data digestable by scripts with below format e.g.: [ {1: {'num_clients': '2','num_threads': '7','latency_avg': '7', 'lat_stddev': '0', 'tps_incl': '234', 'tps_excl': '243'}, {2: {'num_clients': '2','num_threads': '7','latency_avg': '7', 'lat_stddev': '0', 'tps_incl': '234', 'tps_excl': '243'}, {3: {'num_clients': '2','num_threads': '7','latency_avg': '7', 'lat_stddev': '0', 'tps_incl': '234', 'tps_excl': '243'}, ] where keys{1,2,3} are run-IDs """ match = data.split("PGBench Results") list_data = [] for i in range(2, len(match)): log = "".join(match[i].split("\n")) pgsql_data = dict() pgsql_data[i - 1] = {} clients = re.search(r"scaling_factor\':\s+(\d+),", log) if clients and clients.group(1): pgsql_data[i - 1]["scaling_factor"] = clients.group(1) clients = re.search(r"number_of_clients\':\s+(\d+),", log) if clients and clients.group(1): pgsql_data[i - 1]["num_clients"] = clients.group(1) threads = re.search(r"number_of_threads\':\s+(\d+)", log) if threads and threads.group(1): pgsql_data[i - 1]["num_threads"] = threads.group(1) clients = re.search(r"number_of_transactions_per_client\':\s+(\d+),", log) if clients and clients.group(1): pgsql_data[i - 1]["number_of_transactions_per_client"] = clients.group(1) clients = re.search( r"number_of_transactions_actually_processed\':\s+(\d+),", log ) if clients and clients.group(1): pgsql_data[i - 1][ "number_of_transactions_actually_processed" ] = clients.group(1) lat_avg = re.search(r"latency_average_ms\':\s+(\d+)", log) if lat_avg and lat_avg.group(1): pgsql_data[i - 1]["latency_avg"] = lat_avg.group(1) lat_stddev = re.search(r"latency_stddev_ms\':\s+(\d+)", log) if lat_stddev and lat_stddev.group(1): pgsql_data[i - 1]["lat_stddev"] = lat_stddev.group(1) tps_incl = re.search(r"tps_incl_con_est\':\s+(\w+)", log) if tps_incl and tps_incl.group(1): pgsql_data[i - 1]["tps_incl"] = tps_incl.group(1) tps_excl = re.search(r"tps_excl_con_est\':\s+(\w+)", log) if tps_excl and tps_excl.group(1): pgsql_data[i - 1]["tps_excl"] = tps_excl.group(1) list_data.append(pgsql_data) return list_data
5bd5cd43432b17be6bd52004b151b32a0f574980
703,945
def jd2gdate(myjd): """Julian date to Gregorian calendar date and time of day. The input and output are for the proleptic Gregorian calendar. Parameters ---------- myjd: julian date (float). Returns ------- y, m, d, f : int, int, int, float Four element tuple containing year, month, day and the fractional part of the day in the Gregorian calendar. The first three are integers, and the last part is a float. """ jd_i = int(myjd) f = myjd-jd_i # Set JD to noon of the current date. Fractional part is the # fraction from midnight of the current date. if -0.5 < f < 0.5: f += 0.5 elif f >= 0.5: jd_i += 1 f -= 0.5 elif f <= -0.5: jd_i -= 1 f += 1.5 l = jd_i + 68569 n = int((4 * l) / 146097.0) l -= int(((146097 * n) + 3) / 4.0) i = int((4000 * (l + 1)) / 1461001) l -= int((1461 * i) / 4.0) - 31 j = int((80 * l) / 2447.0) day = l - int((2447 * j) / 80.0) l = int(j / 11.0) month = j + 2 - (12 * l) year = 100 * (n - 49) + i + l return int(year), int(month), int(day), f
f43a299fd8627804893eb5b6266d6a016c191d72
703,946
def get_policy_targets(context, presentation): """ Returns our target node templates and groups if we have them. """ node_templates = [] groups = [] our_targets = presentation.targets if our_targets: all_node_templates = \ context.presentation.get('service_template', 'topology_template', 'node_templates') \ or {} all_groups = \ context.presentation.get('service_template', 'topology_template', 'groups') \ or {} for our_target in our_targets: if our_target in all_node_templates: node_templates.append(all_node_templates[our_target]) elif our_target in all_groups: groups.append(all_groups[our_target]) return node_templates, groups
f483b9749c25b7d56c0e0a02a6787d936782e470
703,948
def merge_df(df1, df2): """ Genera un dataframe seleccionant les columnes que ens interessen dels dos dataframes, eliminant valors NA, i actualitza les notes de '538 Grade' simplificant-les. Keyword arguments: df1 -- dataframe que conté les dades de les entrevistes. df2 -- dataframe que conté les dades sobre els agents entrevistadors. """ # Generem el dataframe df = df1[['pollster', 'sample_size', 'party', 'end_date', 'subject', 'very', 'somewhat', 'not_very', 'not_at_all']].merge( df2[['Pollster', '538 Grade', 'Predictive Plus-Minus']], left_on='pollster', right_on='Pollster', how='left').dropna() # Simplifiquem les notes df['538 Grade'] = df['538 Grade'].map({'A': 'A', 'B': 'B', 'B-': 'B', 'B/C': 'C', 'C-': 'C', 'D-': 'D'}) return df
955569ebbbbcf141eedd64ca3ee9b9b89f7922be
703,949
import torch def isPD(B): """Check whether a matrix is positive definite. Args: B ([torch.Tensor]): [Input matrix.] Returns: [bool]: [Returns True if matrix is positive definite, otherwise False.] """ try: _ = torch.cholesky(B) return True except RuntimeError: return False
c51dc4f6f48ac7417f49ef41b81f3b04816b9279
703,950
def convert_TriMap_to_SelectedLEDs( best_led_config ): """ Returns a lookup dict of the selected LEDs. """ d = {} for tri_num in best_led_config: for led_num in best_led_config[tri_num]: d[led_num] = True return d
521a1be0d11cb8198944e437d20d4ac0349c8856
703,951
def _run_symbolic_method(op_name, symbolic_fn, args): """ This trampoline function gets invoked for every symbolic method call from C++. """ try: return symbolic_fn(*args) except TypeError as e: # Handle the specific case where we didn't successfully dispatch # to symbolic_fn. Otherwise, the backtrace will have the clues # you need. e.args = ("{} (occurred when translating {})".format(e.args[0], op_name), ) raise
c95f8d18e4b3a0ed7a06ccc6bdf178a820537d08
703,952
def get_table_id(table): """ Returns id column of the cdm table :param table: cdm table name :return: id column name for the table """ return table + '_id'
33fd8f445f15fb7e7c22535a31249abf6f0c819b
703,954
from typing import Sequence from typing import Hashable def all_items_present(sequence: Sequence[Hashable], values: Sequence[Hashable]) -> bool: """ Check whether all provided `values` are present at any index in the provided `sequence`. Arguments: sequence: An iterable of Hashable values to check for values in. values: An iterable of Hashable values for whose presence to check `sequence` for. Returns: `True` if all `values` are present somewhere in `sequence`, else `False`. """ return all(k in sequence for k in values)
f43a881159ccf147d3bc22cfeb261620fff67d7a
703,955
def reorder(rules): """ Set in ascending order a list of rules, based on their score. """ return(sorted(rules, key = lambda x : x.score))
cf4ff3b8d8aacd5e868ee468b37071fed2c1d67e
703,956
import re def extract_floats(string): """Extract all real numbers from the string into a list (used to parse the CMI gateway's cgi output).""" return [float(t) for t in re.findall(r'[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?', string)]
0dc26261d45bd0974e925df5ed660a6e31adf30c
703,957
def binary(n, digits): """Returns a tuple of (digits) integers representing the integer (n) in binary. For example, binary(3,3) returns (0, 1, 1)""" t = [] for i in range(digits): n, r = divmod(n, 2) t.append(r) return tuple(reversed(t))
bc52a985b86954b1d23bb80a14c56b3e3dfb7c59
703,958
import sympy def GetShapeFunctionDefinitionLine3D3N(x,xg): """ This computes the shape functions on 3D line Keyword arguments: x -- Definition of line xg -- Gauss point """ N = sympy.zeros(3) N[1] = -(((x[1,2]-x[2,2])*(x[2,0]+x[2,1]-xg[0]-xg[1])-(x[1,0]+x[1,1]-x[2,0]-x[2,1])*(x[2,2]-xg[2]))/(-(x[1,0]+x[1,1]-x[2,0]-x[2,1])*(x[0,2]-x[2,2])+(x[0,0]+x[0,1]-x[2,0]-x[2,1])*(x[1,2]-x[2,2]))) N[2] = -((x[0,2]*x[2,0]+x[0,2]*x[2,1]-x[0,0]*x[2,2]-x[0,1]*x[2,2]-x[0,2]*xg[0]+x[2,2]*xg[0]-x[0,2]*xg[1]+x[2,2]*xg[1]+x[0,0]*xg[2]+x[0,1]*xg[2]-x[2,0]*xg[2]-x[2,1]*xg[2])/(x[0,2]*x[1,0]+x[0,2]*x[1,1]-x[0,0]*x[1,2]-x[0,1]*x[1,2]-x[0,2]*x[2,0]+x[1,2]*x[2,0]-x[0,2]*x[2,1]+x[1,2]*x[2,1]+x[0,0]*x[2,2]+x[0,1]*x[2,2]-x[1,0]*x[2,2]-x[1,1]*x[2,2])) N[0] = 1 - N[1] -N[2] return N
aaa2f5b7afac4afc60d2b79ef3f22fba3553aabb
703,959
import os def picasso() -> dict: """Handler for service discovery :returns: picasso service descriptor :rtype: dict """ return {"app": "demo-man", "svc": "picasso", "version": os.environ["VERSION"]}
d8e8fe0ca6287536143149edd47c2e42f932a515
703,960
def _sorted_photon_data_tables(h5file): """Return a sorted list of keys "photon_dataN", sorted by N. If there is only one "photon_data" (with no N) it returns the list ['photon_data']. """ prefix = 'photon_data' ph_datas = [n for n in h5file.root._f_iter_nodes() if n._v_name.startswith(prefix)] ph_datas.sort(key=lambda x: x._v_name[len(prefix):]) return ph_datas
a8df6edb5cfa9b328d7648e0c9ab9f883812ee5a
703,962
def prepare_wld(bbox, mwidth, mheight): """Create georeferencing world file""" pixel_x_size = (bbox.maxx - bbox.minx) / mwidth pixel_y_size = (bbox.maxy - bbox.miny) / mheight left_pixel_center_x = bbox.minx + pixel_x_size * 0.5 top_pixel_center_y = bbox.maxy - pixel_y_size * 0.5 return ''.join(["{:.8f}\n".format(n) for n in [ pixel_x_size, 0.0, 0.0, -pixel_y_size, left_pixel_center_x, top_pixel_center_y ]])
668c348d74780a79a39ebc53f3f119ea37855e8e
703,963
def graphql_refresh_token_mutation(client, variables): """ Refreshes an auth token :param client: :param variables: contains a token key that is the token to update :return: """ return client.execute(''' mutation refreshTokenMutation($token: String!) { refreshToken(token: $token) { token payload } }''', variables=variables)
c217217b289a188de8709dbe875853329d2c3fbc
703,964
def check_duplicate_stats(stats1, stats2, threshold=0.01): """ Check two lists of paired statistics for duplicates. Returns a list of the pairs that agree within to <1%. INPUTS: STATS1 : List of first statistical metric, e.g. Standard Deviations STATS2 : List of second statistical metric, e.g. Centered Root Mean Square Difference OUTPUTS: DUPLICATES : List of tuples of paired statistics that are duplicates. The list contains the index locations of the pairs of statistics followed by their values as 2-tuples. Author: Peter A. Rochford Symplectic, LLC www.thesymplectic.com [email protected] Created on Apr 23, 2017 """ if threshold < 1e-7: ValueError("threshold value must be positive: " + str(threshold)) # Check for non-empty lists if len(stats1) == 0: ValueError("Argument stats1 is empty list!") elif len(stats2) == 0: ValueError("Argument stats2 is empty list!") # Check for matching list lengths if len(stats1) != len(stats2): ValueError( """ * * Arguments stats1 and stats2 have different list lengths. * len(stats1) = {} != len(stats2) = {} * *""".format( len(stats1), len(stats2) ) ) # Search for duplicate pairs of statistics duplicates = [] n = len(stats1) for i in range(n): for j in range(i + 1, n): diff1 = abs((stats1[i] - stats1[j]) / stats1[i]) diff2 = abs((stats2[i] - stats2[j]) / stats2[i]) if diff1 < threshold and diff2 < threshold: duplicates.append( (i, j, (stats1[i], stats2[i]), (stats1[j], stats2[j])) ) return duplicates
eb75d9d02a92cdb337dcbc100b282773543ac894
703,965
import re def _parse_uci_regression_dataset(name_str): """Parse name and seed for uci regression data. E.g. yacht_2 is the yacht dataset with seed 2. """ pattern_string = "(?P<name>[a-z]+)_(?P<seed>[0-9]+)" pattern = re.compile(pattern_string) matched = pattern.match(name_str) if matched: name = matched.group("name") seed = matched.group("seed") return name, seed return None, None
dd2158e1a5ceeba25a088b07ff8064e8016ae551
703,966
import re def get_params(proto): """ get the list of parameters from a function prototype example: proto = "int main (int argc, char ** argv)" returns: ['int argc', 'char ** argv'] """ paramregex = re.compile('.*\((.*)\);') a = paramregex.findall(proto)[0].split(', ') #a = [i.replace('const ', '') for i in a] return a
37841b2503f53353fcbb881993e8b486c199ea58
703,967
import inspect def list_module_public_functions(mod, excepted=()): """ Build the list of all public functions of a module. Args: mod: Module to parse excepted: List of function names to not include. Default is none. Returns: List of public functions declared in this module """ return [t[1] for t in inspect.getmembers(mod, inspect.isfunction) if not t[0].startswith('_') and inspect.getmodule(t[1]) == mod and not t[0] in excepted]
d27dc869cf12701bcb7d2406d60a51a8539a9e1b
703,968
def translate_marker_and_linestyle_to_Plotly_mode(marker, linestyle): """<marker> and <linestyle> are each one and only one of the valid options for each object.""" if marker is None and linestyle != 'none': mode = 'lines' elif marker is not None and linestyle != 'none': mode = 'lines+markers' elif marker is not None and linestyle == 'none': mode = 'markers' else: mode = 'lines' return mode
53de94176afe47f5a9b69e7ad676853b4b19a8db
703,969
def strRT(R, T): """Returns a string for a rotation/translation pair in a readable form. """ x = "[%6.3f %6.3f %6.3f %6.3f]\n" % ( R[0,0], R[0,1], R[0,2], T[0]) x += "[%6.3f %6.3f %6.3f %6.3f]\n" % ( R[1,0], R[1,1], R[1,2], T[1]) x += "[%6.3f %6.3f %6.3f %6.3f]\n" % ( R[2,0], R[2,1], R[2,2], T[2]) return x
2d7ec1bf2ebd5a03472b7b6155ed43fdcc71f76a
703,971
def extract_classes(document): """ document = "545,32 8:1 18:2" extract_classes(document) => returns "545,32" """ return document.split()[0]
b7e8fed3a60e3e1d51a067bef91367f960e34e6b
703,972
def general_value(value): """Checks if value is generally valid Returns: 200 if ok, 700 if ',' in value, 701 if '\n' in value""" if ',' in value: return 700 elif '\n' in value: return 701 else: return 200
5cf8388294cae31ca70ce528b38ca78cdfd85c2c
703,973
def _cast_to(matrix, dtype): """ Make a copy of the array as double precision floats or return the reference if it already is""" return matrix.astype(dtype) if matrix.dtype != dtype else matrix
9625311c0918ca71c679b1ac43abe67f2a4b0f2d
703,974
def reverse_str(string): """ Base case: length of string Modification: str slice """ if len(string) == 1: return string return reverse_str(string[1:]) + string[0]
eb0d27816e8fe54f1136f4a507478f40a3354d72
703,975
def find_key_value_in_list(listing, key, value): """ look for key with value in list and return dict :param listing: :param key: :param value: :return: dict_found """ # for l in listing: # if key in l.keys(): # if l[key] == value: # print("l[key = ", value) # return l dict_found = next(filter(lambda obj: obj[key] == value, listing), None) if dict_found: return dict_found else: return {}
642d8e43cbfbeef9bc014c85085c7027380156e2
703,976
def remove_duplicates(df, by=["full_text"]): """ Remove duplicates from raw data file by specific columns and save results in file with name given. """ boolean_mask = df.duplicated(subset=by, keep="first") df = df[~boolean_mask] return df
dfe99259a90280b346290dd2c880ab51e443e036
703,978
def tex_parenthesis(obj): """Return obj with parenthesis if there is a plus or minus sign.""" result = str(obj) return f"({result})" if "+" in result or "-" in result else result
356a3886d27d431e90de2a76e6590481ad85f05e
703,979
def swap_target_nonterminals(target): """ Swap non-terminal tokens. :param target: List of target tokens :return: List of target tokens """ return ['X_1' if token == 'X_0' else 'X_0' if token == 'X_1' else token for token in target]
56e91df1a513ee5dad1071337463e039ded57a86
703,980
import os import stat def is_executable_file(path): """Checks that path is an executable regular file (or a symlink to a file). This is roughly ``os.path isfile(path) and os.access(path, os.X_OK)``, but on some platforms :func:`os.access` gives us the wrong answer, so this checks permission bits directly. Note ---- This function is taken from the pexpect module, see module doc-string for license. """ # follow symlinks, fpath = os.path.realpath(path) # return False for non-files (directories, fifo, etc.) if not os.path.isfile(fpath): return False # On Solaris, etc., "If the process has appropriate privileges, an # implementation may indicate success for X_OK even if none of the # execute file permission bits are set." # # For this reason, it is necessary to explicitly check st_mode # get file mode using os.stat, and check if `other', # that is anybody, may read and execute. mode = os.stat(fpath).st_mode if mode & stat.S_IROTH and mode & stat.S_IXOTH: return True # get current user's group ids, and check if `group', # when matching ours, may read and execute. user_gids = os.getgroups() + [os.getgid()] if (os.stat(fpath).st_gid in user_gids and mode & stat.S_IRGRP and mode & stat.S_IXGRP): return True # finally, if file owner matches our effective userid, # check if `user', may read and execute. user_gids = os.getgroups() + [os.getgid()] if (os.stat(fpath).st_uid == os.geteuid() and mode & stat.S_IRUSR and mode & stat.S_IXUSR): return True return False
b9ec4bfa15d0a121ff4958b146d8e1646e6b15ed
703,981
def padZeros(numberString, numZeros, insertSide): """Return a string padded with zeros on the left or right side.""" if insertSide == 'left': return '0' * numZeros + numberString elif insertSide == 'right': return numberString + '0' * numZeros
d0c2d08a392e4792b13a64d076c8fb6aff1572cb
703,982
def readdirs(DIR): """Implementation of perl readdir in list context""" result = (DIR[0])[DIR[1]:] DIR[1] = len(DIR[0]) return result
98d9b588704ea2820b14ba2c5542ea0a619a02ce
703,983
def behav_data_inverted(df): """ Flips the dimensions that need inverting Faster than using is_inverted_dim """ # Apparently groupby with categorical dtype is broken # See https://github.com/pandas-dev/pandas/issues/22512#issuecomment-422422573 df["class_"] = df["class_"].astype(str) inverted_map = ( df[(df["morph_pos"] == 1)] .groupby(["subj", "morph_dim"], observed=True) .agg(lambda x: x.iloc[0])["class_"] == "R" ) df = df.join( inverted_map.to_frame(name="inverted"), on=("subj", "morph_dim"), how="left", sort=False, ) df["greater_response"] = (df["response"] == "R") != (df["inverted"]) return df
69ad0d4cea1a12b2dd8dc256c77b13f1002ae6b8
703,984
def _resample_event_obs(obs, fx, obs_data): """ Resample the event observation. Parameters ---------- obs : datamodel.Observation The Observation being resampled. fx : datamodel.EventForecast The corresponding Forecast. obs_data : pd.Series Timeseries data of the event observation. Returns ------- obs_resampled : pandas.Series Timeseries data of the Observation resampled to match the Forecast. Raises ------ RuntimeError If the Forecast and Observation do not have the same interval length. """ if fx.interval_length != obs.interval_length: raise ValueError("Event observation and forecast time-series " "must have matching interval length.") else: obs_resampled = obs_data return obs_resampled
1c66ae124aaa2e732c7d0ec3e733ae2b5caaa6cb
703,985
def _arg_raw(dvi, delta): """Return *delta* without reading anything more from the dvi file""" return delta
041cfaaf23c6e229b60d5278e8cf27352e078a65
703,986
def to_bytes(binary_string: str) -> bytes: """Change a string, like "00000011" to a bytestring :param str binary_string: The string :returns: The bytestring :rtype: bytes """ if len(binary_string) % 8 != 0: binary_string += "0" * ( 8 - len(binary_string) % 8 ) # fill out to a multiple of 8 assert set(binary_string) <= {"0", "1"} # it really is a binary string assert len(binary_string) % 8 == 0 # it really is in 8-bit bytes size = len(binary_string) // 8 binary_int = int(binary_string, 2) return binary_int.to_bytes(size, byteorder="big")
83dda243e27d7f7988d520c0455e43d1937d5447
703,987
def _read_tmpfd(fil): """Read from a temporary file object Call this method only when nothing more will be written to the temporary file - i.e., all the writing has already been done. """ fil.seek(0) return fil.read()
08648325e7e0e9bcd543d3238cb4630ac284f6ed
703,988
def evens(input): """ Returns a list with only the even elements of data Example: evens([0, 1, 2, 3, 4]) returns [0,2,4] Parameter input: The data to process Precondition: input an iterable, each element an int """ result = [] for x in input: if x % 2 == 0: result.append(x) return result
8a219f8815d95a18bea148eaae117f3356a77d4b
703,989
def _check_insert_data(obj, datatype, name): """ Checks validity of an object """ if obj is None: return False if not isinstance(obj, datatype): raise TypeError("{} must be {}; got {}".format( name, datatype.__name__, type(obj).__name__)) return True
057d0124db3f304e7efd4093510c663f5383af63
703,990
import os def dir_exists(foldername): """ Return True if folder exists, else False """ return os.path.isdir(foldername)
edf3bc0dcdb16e816f48134ede420b758aa53d16
703,991
def piece_not(piece: str) -> str: """ helper function to return the other game piece that is not the current game piece Preconditions: - piece in {'x', 'o'} >>> piece_not('x') 'o' >>> piece_not('o') 'x' """ return 'x' if piece == 'o' else 'o'
18bb3b45accf98d4f914e3f50372c4c083c1db4d
703,992
def argsum(*args): """sum of all arguments""" return sum(args)
2445ef4f3fc321b3eae1997a8c44c628cd72d70a
703,993