content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import requests def get_api_data(session, url, date_time): """Get the JSON-formatted response from the AMM API for the desired date-time. """ session = session or requests.Session() return session.get(url, params={"dt": date_time.format("DD/MM/YYYY")}).json()
ff44be4a958c4f05cc5bd562854059c552f693e1
8,518
def trunc(s, length): """Truncate a string to a given length. The string is truncated by cutting off the last (length-4) characters and replacing them with ' ...' """ if s and len(s) > length: return s[:length - 4] + ' ...' return s or ''
ea6d1702d709ac7d94cc2bcb2e945da71009c0fe
8,520
import operator def sort_values(values: list[tuple]) -> list[tuple]: """Returns a list of tuples sorted by x value (the value at index 0 of the inner tuple.""" return sorted(values, key=operator.itemgetter(0))
f172f68418988d4e01dcc406de8ec467abfe7aa8
8,521
def expose(class_method): """ Decorator which exposes given method into interface :param class_method: method to expose :return: given method with modifications """ class_method.is_exposed = True return class_method
ee234bd7535f29c39fc80643997b89aeb3c0f533
8,524
def func(pseudo_state, a1, a2, b1, b2, c1, c2, d1, d2): """ quadratic fit function for the Bellman value at given pseudo-state :param pseudo_state: list(float) - list of the four state variables for a given state :param a1, a2, ... d2: float - parameters of the quadratic fit function """ sum = a1*pseudo_state[0]**2 + a2*pseudo_state[0] sum += b1*pseudo_state[1]**2 + b2*pseudo_state[1] sum += c1*pseudo_state[2]**2 + c2*pseudo_state[2] sum += d1*pseudo_state[3]**2 + d2*pseudo_state[3] return sum
6478219704999dc4cfcbc915126d919e15fe3043
8,526
def intersection(box1, box2): """ Args: box1: bounding box box2: bounding box Returns: float: the area that intersects the two boxes """ y_min1, x_min1, y_max1, x_max1 = box1 y_min2, x_min2, y_max2, x_max2 = box2 min_ymax = min(y_max1, y_max2) max_ymin = max(y_min1, y_min2) intersect_heights = max(0, min_ymax - max_ymin) min_xmax = min(x_max1, x_max2) max_xmin = max(x_min1, x_min2) intersect_widths = max(0, min_xmax - max_xmin) return intersect_heights * intersect_widths
71746d93ead54aa5b36e7e6a5eb40e757711bef5
8,527
def calculateHeaders(tokens: list, rawHeaders: tuple) -> tuple: """ Takes sanitised, tokenised URCL code and the rawHeaders. Calculates the new optimised header values, then returns them. """ BITS = rawHeaders[0] bitsOperator = rawHeaders[1] MINREG = 0 MINHEAP = rawHeaders[3] MINSTACK = rawHeaders[4] RUN = rawHeaders[5] for line in tokens: for token in line: if token.startswith("R"): if token[1: ].isnumeric(): number = int(token[1:]) if number > MINREG: MINREG = number headers = (BITS, bitsOperator, MINREG, MINHEAP, MINSTACK, RUN) return headers
472eeb4397d68e232b66517064f91e5688c33e3c
8,531
from datetime import datetime def month_counter(fm, LAST_DAY_OF_TRAIN_PRD=(2015, 10, 31)): """Calculate number of months (i.e. month boundaries) between the first month of train period and the end month of validation period. Parameters: ----------- fm : datetime First day of first month of train period Returns: -------- Number of months between first month of train period and end month of validation period """ return ( (datetime(*LAST_DAY_OF_TRAIN_PRD).year - fm.year) * 12 + datetime(*LAST_DAY_OF_TRAIN_PRD).month - fm.month )
e10e6be0eb8a7762b182d073ca85ed1b97f831d3
8,535
def to_unicode(obj): """Convert object to unicode""" if isinstance(obj, bytes): return obj.decode('utf-8', 'ignore') return str(obj)
e54c02e04109b8a99a7eb4e357e95ead89166137
8,536
def get_shape(x, unknown_dim_size=1): """ Extract shape from onnxruntime input. Replace unknown dimension by default with 1. Parameters ---------- x: onnxruntime.capi.onnxruntime_pybind11_state.NodeArg unknown_dim_size: int Default: 1 """ shape = x.shape # replace unknown dimensions by default with 1 shape = [i if isinstance(i, int) else unknown_dim_size for i in shape] return shape
1c719191922a46b948fb567273e3a5152769e190
8,539
def word_tally(word_list): """ Compiles a dictionary of words. Keys are the word, values are the number of occurrences of this word in the page. :param word_list: list List of words :return: dictionary Dict of words: total """ word_dict = {} for word in word_list: if not word_dict.get(word): word_dict[word] = 1 else: word_dict[word] += 1 return word_dict
5ab1f7ac4c8a72cd5ceda2a391cef8a62a1ec34f
8,540
import re def fix_extension(filename_end): """Processes ending section of filename to get extension Args: filename_end (str): starting section of filename Returns: str: file extension """ return_value = filename_end pattern_string = r".*\.(\w{3})$" pattern = re.compile( pattern_string, flags=re.IGNORECASE ) match = pattern.search(return_value) if match == None: raise ValueError return_value = match.group(1) return return_value
5317c3c52920d669374ac72cc6cccc70a2740174
8,548
def parse_map_align_stdout(stdout): """Parse the stdout of map_align and extract the alignment of residues. Parameters ---------- stdout : str Standard output created with map_align Returns ------ dict A dictionary where aligned residue numbers in map_b are the keys and residue numbers in map_a values. Only misaligned regions are included. """ alignment_dict = {} for line in stdout.split('\n'): if line and line.split()[0] == "MAX": line = line.rstrip().lstrip().split() for residue_pair in line[8:]: residue_pair = residue_pair.split(":") if residue_pair[0] != residue_pair[1]: alignment_dict[int(residue_pair[1])] = int(residue_pair[0]) return alignment_dict
4cb699ffbb817e80402af22b08240323012919f8
8,551
import struct import socket def ip_to_ascii(ip_address): """ Converts the quad IP format to an integer representation. """ return struct.unpack('!L', socket.inet_aton(ip_address))[0]
2cb3ccbe70eed2dd2e8ac21d10e180805dec95ea
8,555
from typing import Any from typing import Sequence def complete_ports(_: Any, __: Any, incomplete: str) -> Sequence[str]: """Returns common ports for completion.""" return [k for k in ('80', '443', '8080') if k.startswith(incomplete)]
694306ae57bcfd21d6fa73a595768dde0ffba86a
8,557
def coord_arg_to_coord(carg): """ Parameters ---------- carg : str Argument from parser for coordinates Eligible formats are like: J081240.7+320809 122.223,-23.2322 07:45:00.47,34:17:31.1 Returns ------- icoord : str or tuple Allowed format for coord input to linetools.utils.radec_to_coord """ if ',' in carg: radec = carg.split(',') if ':' in radec[0]: # 07:45:00.47,34:17:31.1 icoord = (radec[0], radec[1]) else: # 122.223,-23.2322 icoord = (float(radec[0]), float(radec[1])) else: # J081240.7+320809 icoord = carg # Return return icoord
16a6cb8090dc040b7b5f6a1a4ba873ea65e0dfdf
8,561
def all_success(mgmt_commands): """Determines if all child processes were successful. Args: mgmt_commands : A list of all Command objects Returns: True if all child processes succeeded """ for mgmt_command in mgmt_commands: if mgmt_command.retcode != 0: return False return True
1bc0d32491711e0d20106f1f294093b30e77bd55
8,566
import re def get_nameservice(hdfs_site): """ Multiple nameservices can be configured for example to support seamless distcp between two HA clusters. The nameservices are defined as a comma separated list in hdfs_site['dfs.nameservices']. The parameter hdfs['dfs.internal.nameservices'] was introduced in Hadoop 2.6 to denote the nameservice for the current cluster (HDFS-6376). This method uses hdfs['dfs.internal.nameservices'] to get the current nameservice, if that parameter is not available it tries to splits the value in hdfs_site['dfs.nameservices'] returning the first string or what is contained in hdfs_site['dfs.namenode.shared.edits.dir']. By default hdfs_site['dfs.nameservices'] is returned. :param hdfs_site: :return: string or empty """ name_service = hdfs_site.get('dfs.internal.nameservices', None) if not name_service: name_service = hdfs_site.get('dfs.nameservices', None) if name_service: for ns in name_service.split(","): if 'dfs.namenode.shared.edits.dir' in hdfs_site and re.match(r'.*%s$' % ns, hdfs_site['dfs.namenode.shared.edits.dir']): # better would be core_site['fs.defaultFS'] but it's not available return ns return name_service.split(",")[0] # default to return the first nameservice return name_service
65a86316112a94b6f361daea88cd5658d8019668
8,567
def checkChanList(chanprof, profile, chanList): """ Return non-zero value if any element of chanlist is not in the channel list of profile """ for c in chanList: if c not in chanprof[profile-1]: return 1 return 0
face301b61634bcff8721fcafb1cbc09e2bd0e5f
8,568
def get_copysetup(copytools, copytool_name): """ Return the copysetup for the given copytool. :param copytools: copytools list from infosys. :param copytool name: name of copytool (string). :return: copysetup (string). """ copysetup = "" for ct in copytools.keys(): if copytool_name == ct: copysetup = copytools[ct].get('setup') break return copysetup
1181352a178f954d17cf7d7b8fc6c798b441d4a6
8,574
def last_occurence_of_tag_chain(sequence, tag_type): """ Takes a sequence of tags. Assuming the first N tags of the sequence are all of the type tag_type, it will return the index of the last such tag in that chain, i.e. N-1 If the first element of the sequence is not of type tag_type, it will return -1 """ for i in range(0, len(sequence)): if sequence[i][1] != tag_type: return i - 1 return len(sequence)
0edcf25e18ff4b3701f92c13bab2b634b738c158
8,575
def cal_fdp_power(selected, non_zero_index, r_index=False): """ Calculate power and False Discovery Proportion Parameters ---------- selected: list index (in R format) of selected non-null variables non_zero_index: true index of non-null variables r_index : True if the index is taken from rpy2 inference Returns ------- fdp: False Discoveries Proportion power: percentage of correctly selected variables over total number of non-null variables """ # selected is the index list in R and will be different from index of # python by 1 unit if selected.size == 0: return 0.0, 0.0 if r_index: selected = selected - 1 true_positive = [i for i in selected if i in non_zero_index] false_positive = [i for i in selected if i not in non_zero_index] fdp = len(false_positive) / max(1, len(selected)) power = len(true_positive) / len(non_zero_index) return fdp, power
5e14b19c95ec0bc465c6ea6c98606768f00ee49e
8,580
def _create_types_map(sqltypes): """Take a types module and utilising the `dir` function create a mapping from the string value of that attribute to the SQLAlchemy type instance. """ sql_types_map = { item.lower(): getattr(sqltypes, item) for item in dir(sqltypes) if item[0].isupper() } return sql_types_map
69a3902663eadc70050acc0c1869fcb86a2a6384
8,581
def find_author(name, authors): """ Find the author with the provided name, in the list of authors""" for author in authors: if author.name == name: return author raise ValueError('Author', name, 'not found.')
dd47f0ecf8574d68a0ce9b5e94dff19b58f5887a
8,584
import hashlib def md5_key(chrom, start, end, ref, alt, assembly): """Generate a md5 key representing uniquely the variant Accepts: chrom(str): chromosome start(int): variant start end(int): variant end ref(str): references bases alt(str): alternative bases assembly(str) genome assembly (GRCh37 or GRCh38) Returns: md5_key(str): md5 unique key """ hash = hashlib.md5() hash.update( (" ".join([chrom, str(start), str(end), str(ref), str(alt), assembly])).encode("utf-8") ) md5_key = hash.hexdigest() return md5_key
64db55c0075d063aeec500f97700ec769840cc4f
8,589
import base64 def base64_encode_nifti(image): """Returns base64 encoded string of the specified image. Parameters ---------- image : nibabel.Nifti2Image image to be encoded. Returns ------- str base64 encoded string of the image. """ encoded_image = base64.encodebytes(image.to_bytes()) enc = encoded_image.decode("utf-8") return enc
5e3758089240d8840c1cb1828ab908dead3b4b7c
8,596
from pathlib import Path import pickle def load_trained_model(fname: str): """ Loads a saved ModelData object from file. Args: fname (str): Complete path to the file. Returns: A ModelData object containing the model, list of titles, list of authors, list of genres, list of summaries, training corpus, and test corpus. """ fp = Path(fname) with fp.open("rb") as f: model_data = pickle.load(f) return model_data
3bdfa3f090fa5efcd54b17bd47a0cd4ea57e1c4a
8,600
def icosahedron_nodes_calculator(order): """Calculate the number of nodes corresponding to the order of an icosahedron graph Args: order (int): order of an icosahedron graph Returns: int: number of nodes in icosahedron sampling for that order """ nodes = 10 * (4 ** order) + 2 return nodes
eccea98ec5da3fae748c3505af4689dfe6f47b73
8,604
import click def validate_profile(context, param, value): """ Validates existance of profile. Returns the profile name if it exists; otherwise throws BadParameter """ if value in context.obj.configuration.profiles(): return value else: raise click.BadParameter("\"%s\" was not found" % value)
ac1fd3caa99a510173aa96f4c781abfacb6eed97
8,607
def merge_configs(*configs): """ Merges dictionaries of dictionaries, by combining top-level dictionaries with last value taking precedence. For example: >>> merge_configs({'a': {'b': 1, 'c': 2}}, {'a': {'b': 2, 'd': 3}}) {'a': {'b': 2, 'c': 2, 'd': 3}} """ merged_config = {} for config in configs: for k, v in config.items(): if k in merged_config: merged_config[k].update(v) else: merged_config[k] = v.copy() return merged_config
dbbdff74695233b522cd4381a78ca82e6f8057fd
8,611
def label_tsne(tsne_results, sample_names, tool_label): """ Label tSNE results. Parameters ---------- tsne_results : np.array Output from run_tsne. sample_names : list List of sample names. tool_label : str The tool name to use for adding labels. Returns ------- dict Dictionary of the form: {<sample_name>: <coordinate>}. """ tsne_labeled = {sample_names[i]: {f'{tool_label}_x': float(tsne_results[i][0]), f'{tool_label}_y': float(tsne_results[i][1])} for i in range(len(sample_names))} return tsne_labeled
50422e192e57fb6a019618d46e9a95b9b3c3c768
8,613
from typing import List def get_routes(vehicles: List[int], stops: List[int]): """ Create dict of vehicles (key) and their routes (value). :vehicles: list of vehicle identities (same order as demand) :stops: list of stop numbers (same order as vehicles) return dict """ counts = {} for vehicle in vehicles: if vehicle in counts: counts[vehicle] += 1 else: counts[vehicle] = 1 routes = {} for i, vehicle in enumerate(vehicles): if vehicle not in routes: routes[vehicle] = [None for j in range(counts[vehicle])] routes[vehicle][int(stops[i]) - 1] = i return routes
966baf998c0ec22ad381175a5680b4cefd045a6f
8,616
def progress_bar(progress, size = 20): """ Returns an ASCII progress bar. :param progress: A floating point number between 0 and 1 representing the progress that has been completed already. :param size: The width of the bar. .. code-block:: python >>> ui.progress_bar(0.5, 10) '[#### ]' """ size -= 2 if size <= 0: raise ValueError("size not big enough.") if progress < 0: return "[" + "?" * size + "]" else: progress = min(progress, 1) done = int(round(size * progress)) return "[" + "#" * done + " " * (size - done) + "]"
ab3bffd9e2c9c0060001a3058217690e8d30a67d
8,618
def density(temp): """ Calculating density of water due to given temperature (Eq. 3.11) :param temp: temperature prediction Y[d, t] at depth d and time t :return: corresponding density prediction """ return 1000 * (1 - ((temp + 288.9414) * (temp - 3.9863) ** 2) / (508929.2 * (temp + 68.12963)))
92d6d7c5639e03790715f62a1027a15357cdf1cf
8,624
import torch def distance2bbox(points, distance, max_shape=None): """Decode distance prediction to bounding box. Args: points (Tensor): Shape (n, 3), [t, x, y]. distance (Tensor): Distance from the given point to 4 boundaries (left, top, right, bottom, frDis, 4point, bkDis, 4point). max_shape (list): Shape of the image. Returns: Tensor: Decoded bboxes. """ mid_t = points[:, 0] mid_x1 = points[:, 1] - distance[:, 0] mid_y1 = points[:, 2] - distance[:, 1] mid_x2 = points[:, 1] + distance[:, 2] mid_y2 = points[:, 2] + distance[:, 3] fr_t = points[:, 0] + distance[:, 4] fr_x1 = mid_x1 + distance[:, 5] fr_y1 = mid_y1 + distance[:, 6] fr_x2 = mid_x2 + distance[:, 7] fr_y2 = mid_y2 + distance[:, 8] bk_t = points[:, 0] - distance[:, 9] bk_x1 = mid_x1 + distance[:, 10] bk_y1 = mid_y1 + distance[:, 11] bk_x2 = mid_x2 + distance[:, 12] bk_y2 = mid_y2 + distance[:, 13] if max_shape is not None: mid_x1 = mid_x1.clamp(min=0, max=max_shape[2]) mid_y1 = mid_y1.clamp(min=0, max=max_shape[1]) mid_x2 = mid_x2.clamp(min=0, max=max_shape[2]) mid_y2 = mid_y2.clamp(min=0, max=max_shape[1]) fr_t = fr_t.clamp(min=0, max=max_shape[0]) fr_x1 = fr_x1.clamp(min=0, max=max_shape[2]) fr_y1 = fr_y1.clamp(min=0, max=max_shape[1]) fr_x2 = fr_x2.clamp(min=0, max=max_shape[2]) fr_y2 = fr_y2.clamp(min=0, max=max_shape[1]) bk_t = bk_t.clamp(min=0, max=max_shape[0]) bk_x1 = bk_x1.clamp(min=0, max=max_shape[2]) bk_y1 = bk_y1.clamp(min=0, max=max_shape[1]) bk_x2 = bk_x2.clamp(min=0, max=max_shape[2]) bk_y2 = bk_y2.clamp(min=0, max=max_shape[1]) return torch.stack([mid_t, mid_x1, mid_y1, mid_x2, mid_y2, fr_t, fr_x1, fr_y1, fr_x2, fr_y2, bk_t, bk_x1, bk_y1, bk_x2, bk_y2], -1)
ea773f3bd0d53a2aaccb85c7b7042c51c3dd0653
8,625
from typing import Tuple def _partition(lst: list, pivot: object) -> Tuple[list, list]: """Return a partition of <lst> with the chosen pivot. Return two lists, where the first contains the items in <lst> that are <= pivot, and the second is the items in <lst> that are > pivot. """ smaller = [] bigger = [] for item in lst: if item <= pivot: smaller.append(item) else: bigger.append(item) return smaller, bigger
07950d665eca6b5591d3c8b65a980c2597b9e45a
8,627
def split_arguments(args, splitter_name=None, splitter_index=None): """Split list of args into (other, split_args, other) between splitter_name/index and `--` :param args: list of all arguments :type args: list of str :param splitter_name: optional argument used to split out specific args :type splitter_name: str :param splitter_index: specific index at which to split :type splitter_index: int :returns: tuple (other, split_args) """ if splitter_index is None: if splitter_name not in args: return args, [] splitter_index = args.index(splitter_name) start_index = splitter_index + 1 end_index = args.index('--', start_index) if '--' in args[start_index:] else None if end_index: return ( args[0:splitter_index], args[start_index:end_index], args[(end_index + 1):] ) else: return ( args[0:splitter_index], args[start_index:], [] )
c6e800ff6d109699d346c76052a70e4e5ab670d8
8,628
import re def is_valid_record2(parsed_record): """Check if parsed_record is properly formatted""" if not ( "byr" in parsed_record and parsed_record["byr"].isdigit() and len(parsed_record["byr"]) == 4 and (1920 <= int(parsed_record["byr"]) <= 2002) ): return False if not ( "iyr" in parsed_record and parsed_record["iyr"].isdigit() and len(parsed_record["iyr"]) == 4 and (2010 <= int(parsed_record["iyr"]) <= 2020) ): return False if not ( "eyr" in parsed_record and parsed_record["eyr"].isdigit() and len(parsed_record["eyr"]) == 4 and (2020 <= int(parsed_record["eyr"]) <= 2030) ): return False if "hgt" in parsed_record: match = re.match(r"(?P<value>\d+)(?P<unit>in|cm)$", parsed_record["hgt"]) if not match: return False value = int(match.group("value")) unit = match.group("unit") if not ( (unit == "cm" and 150 <= value <= 193) or (unit == "in" and 59 <= value <= 76) ): return False else: return False if not ( "hcl" in parsed_record and re.match(r"#[0-9a-f]{6}$", parsed_record["hcl"]) ): return False if not ( "ecl" in parsed_record and re.match(r"amb|blu|brn|gry|grn|hzl|oth$", parsed_record["ecl"]) ): return False if not ("pid" in parsed_record and re.match(r"\d{9}$", parsed_record["pid"])): return False return True
d3fdb17f6c6726e74e02f41813c665e8be223406
8,630
def lua_property(name): """ Decorator for marking methods that make attributes available to Lua """ def decorator(meth): def setter(method): meth._setter_method = method.__name__ return method meth._is_lua_property = True meth._name = name meth.lua_setter = setter return meth return decorator
97cd57cf21c4afdb43b6504af56139228df751cd
8,632
def ip2int(ip_str): """ Convert XXX.XXX.XXX.XXX to integer representation Args: ip_str (str): IP in a XXX.XXX.XXX.XXX string format Returns: ip_int (int): Integer IP representation """ ip_int = None if isinstance(ip_str,str): # clean IP if ip_str.find(':') > 0: ip_str = ip_str[:ip_str.index(':')] octet = [int(x.strip()) for x in ip_str.split('.')] ip_int = octet[0] * pow(256,3) + octet[1] * pow(256,2) + octet[2] * 256 + octet[3] return ip_int
bb48f28519593222c005df1b009d7e669dde7669
8,634
def calculate_mixing_param_constants(asm_obj): """Calculate the constants Cs and Cm required for the determination of the Cheng-Todreas mixing parameters Parameters ---------- asm_obj : DASSH Assembly object Contains the geometry and flow parameters Returns ------- tuple tuple of two dicts, each containing the laminar and turbulent constants for the calculation of eddy diffusivity and the swirl velocity for the assembly Notes ----- Implemented as a separate method so that it can be tested against the results in Tables 1 and 2 of the Cheng-Todreas 1986 paper. """ try: c_over_d = (asm_obj.d['pin-pin'] / asm_obj.pin_diameter)**-0.5 except ZeroDivisionError: # single pin, d['pin-pin'] = 0 c_over_d = 0.0 h_over_d = (asm_obj.wire_pitch / asm_obj.pin_diameter)**0.3 cm = {} cs = {} if asm_obj.n_pin >= 19: # Laminar cm['laminar'] = 0.077 * c_over_d cs['laminar'] = 0.413 * h_over_d # Turbulent cm['turbulent'] = 0.14 * c_over_d cs['turbulent'] = 0.75 * h_over_d else: # Laminar cm['laminar'] = 0.055 * c_over_d cs['laminar'] = 0.33 * h_over_d # Turbulent cm['turbulent'] = 0.1 * c_over_d cs['turbulent'] = 0.6 * h_over_d return cm, cs
336aa6de073fa9eece218deef0d236f47c7fea79
8,639
def cast_str_to_bool(input_string: str) -> bool: """Convert string to boolean with special handling for case, "True", 1. Special string parsing for booleans. Args: input_string (str): Evaluate this string as bool. Returns: case-insensitive 'True', '1' or '1.0' is True. It will be False for all other strings. """ return input_string.lower() in ['true', '1', '1.0']
38898f9aaa14ce9d6872941252213431c07878d1
8,640
def phi31_v_from_i_skow(phi31_i): """ Calculates the V band phi31 for the given I band phi3 using the relationship found in Skowron et al. (2016). (Skowron et al., 2016) (6) Parameters ---------- phi31_i : float64 The I band phi31 of the star. Returns ------- phi31_v : float64 The calculated V band phi31 of the star. """ return 0.122 * phi31_i ** 2 - 0.750 * phi31_i + 5.331
9594e7676bf844908e6555c4064aa795272e529d
8,641
def get_object_type(objects: list, types: list) -> list: """Get the object specified. Args: objects: a list of objects. types: a list of the types. Returns: A list of a certain type. """ return [item for item in objects if item.get('type') in types]
bfccc9c838f0a2d3294068acc0d2091c44de4798
8,642
import time def unix() -> int: """ Return the current time in seconds since the Epoch. Fractions of a second may be present if the system clock provides them. Returns: int """ return int(time.time())
3e5a0933d9a9eaee7c9f4136f651af6f2982dacc
8,643
def surface_carre(arete): """ fonction qui calcule et renvoie la surface d'un carré :param arête: la longueur d'un des côtés du carré :type arete: int ou float :return: la surface du carré dans l'unité (au carré) de celle d'arete :rtype: int ou float """ return arete*arete
fecc5ceae98a3549ccc79237cf94423e166a4429
8,648
def is_service(hass, entry): """check whether config entry is a service""" domain, service = entry.split(".")[0], ".".join(entry.split(".")[1:]) return hass.services.has_service(domain, service)
d244dc15be20a7e56a17695dbf7df4c1811650a5
8,657
def any_in(collection, values): """ Check if any of a collection of values is in `collection`. Returns boolean. """ for value in values: if value in collection: return True return False
a8d4471940e96d2b6a307c8ccf48caaaecb10f98
8,658
def _make_unique(l): """Check that all values in list are unique and return a pruned and sorted list.""" return sorted(set(l))
18ae627f2a8f5dc8c61a332b73d8bd99c41d5ced
8,669
import yaml def parse_config_file(config_file) -> dict: """Read config.yaml file with params. Returns ------- dict Dict of config """ with open(config_file, 'r') as stream: try: CONFIG = yaml.safe_load(stream) except yaml.YAMLError as exc: print(exc) exit(1) return CONFIG
8f1fb9bcda94ef5c21edbf5e5bf95b327efd8c96
8,673
def collision(object1, object2): """detect collision between two objects using circles Tests for collision between two objects by testing whether two circles centered on the objects overlap. Objects must have a a "radius" attribute, which is used to create the circle. Args: object1: First object for collision detection object2: Second object for collision detection Returns: bool: True if objects' circles (defined by their radius) overlap """ x_distance = object1.x - object2.x y_distance = object1.y - object2.y distance_squared = x_distance ** 2 + y_distance ** 2 try: return distance_squared <= \ (object1.fire_radius + object2.fire_radius) ** 2 except AttributeError: return distance_squared <= (object1.radius + object2.radius) ** 2
e42bcce7a111fa7f8de2c10b16a91c0d49992ddb
8,676
def find_key_symptom(tariffs, cause_reduction, cause, endorsements, rules=None): """Find the key endorsed symptom for a cause Args: tariffs (dict): processed tariff matrix cause_reduction (dict): mapping from cause46 to cause34 cause (int): cause number at the cause34 level endorsements (iterable): names of endorsed symptoms rules (dict): mapping of rule-based cause prediction to key symptom Returns: symptom (str): name of the key symptom """ rules = rules or {} rule_symp = rules.get(cause) if rule_symp: return rule_symp causes46s = [cause46 for cause46, cause34 in cause_reduction.items() if cause34 == cause] values = {} for cause46 in causes46s: for symptom, tariff in tariffs[cause46]: if symptom not in endorsements or tariff <= 0: continue if symptom in values and values[symptom] < tariff: continue else: values[symptom] = tariff if values: return sorted(values.items(), key=lambda x: x[1])[-1][0]
e8805fd29bf09cd3e0269ae4203f4fd7912f5c72
8,678
def transform(tokens): """ Accumulate tokens in lines. Add token (and white spaces) to a line until it overflow 80 chars. """ lines = [] current_line = [] for t in tokens: if sum([len(x) + 1 for x in current_line]) + len(t) > 80: lines.append(current_line) current_line = [] current_line.append(t) if current_line: lines.append(current_line) return lines
c30af21db61b2b00848b0263552461f9682a6d08
8,680
import json def ocr_loader(json_path): """Helper function to load ocr data from json file Args: json_path (string): Path to the json file with OCR output data Returns: string: OCR text output """ json_path = json_path.replace('\\', '/') with open(json_path, "r") as file: loaded_json = json.load(file) if type(loaded_json) is list: result = loaded_json else: result = loaded_json['text'] return " ".join(result)
7e182b184b305bffc97dadf59b139a1aa53250b1
8,681
def to_ps(obj, parlen=False): """Converts object into postscript literal >>> to_ps(None) 'null' >>> to_ps(123) '123' >>> to_ps(456.78) '456.78' >>> to_ps(True), to_ps(False) ('true', 'false') >>> to_ps('foo bar baz') 'foo bar baz' >>> to_ps('foo bar baz', parlen=True) '(foo bar baz)' """ if isinstance(obj, str): ret = '%s' % obj if parlen: ret = '(%s)' % ret elif isinstance(obj, bool): ret = {True: 'true', False: 'false'}[obj] elif isinstance(obj, type(None)): ret = 'null' else: ret = str(obj) return ret
11fa2888678970f9ab37e4827e87bbf67856898c
8,682
def enum_member_name(state): """ All enum member names have the form <EnumClassName>.<EnumMemberName>. For our rendering we only want the member name, so we take their representation and split it. """ return str(state).split('.', 1)[1]
d6fa9320c1f96209fd6d547f1a7715ade391c672
8,683
def request_path( request ): """ Get the path of the request """ return str(request.path)
506672b635b6196aa032c7bed5b740c5a8d70c79
8,689
import base64 import hashlib def _sub_hash_password(password): """ Hash long password to allow bcrypt to handle password longer than 72 characters. :param password: password to hash. :return: (String) The hashed password. """ # bcrypt only handles passwords up to 72 characters. # We use this hashing method as a work around. # Suggested in bcrypt PyPI page (2018/02/08 12:36 EST): # https://pypi.python.org/pypi/bcrypt/3.1.0 return base64.b64encode(hashlib.sha256(password.encode("utf-8")).digest())
73498949c1e29712192d9379d14cf816d095a01a
8,691
from colorsys import rgb_to_hsv def from_rgb_to_paletton_hue(rgb, paletton): """ >>> p = Paletton() >>> print(from_rgb_to_paletton_hue((120, 0, 106), p)) 318 """ rhs_hue = round(rgb_to_hsv(*rgb)[0]*360) if rhs_hue not in paletton.HUE_OFFSETS: keys = sorted(paletton.HUE_OFFSETS.keys()) closest_offset_index = sorted(keys + [rhs_hue]).index(rhs_hue) rhs_hue = keys[closest_offset_index-1] return paletton.HUE_OFFSETS[rhs_hue]
c76f257f46fc9c84d830c2f006f1af8155cbb38f
8,693
def iobes2iob(iobes): """Converts a list of IOBES tags to IOB scheme.""" dico = {pfx: pfx for pfx in "IOB"} dico.update({"S": "B", "E": "I"}) return [dico[t[0]] + t[1:] if not t == "O" else "O" for t in iobes]
3baed417dfccf25ddf5f0bdae686a01fa8bfda95
8,695
def getFromLongestMatchingKey(object, listOfKeys, caseInsensitive=True): """ Function to take an object and a list of keys and return the value of the longest matching key or None if no key matches. :param object: The object with the keys. :type object: dict :param listOfKeys: A list of keys to try to match :type listOfKeys: list of string keys :param caseInsensitive: Case insensitive key matching? :type caseInsensitive: boolean :returns: value of longest matching key in object """ listOfKeys = listOfKeys.copy() if caseInsensitive: object = {k.lower():v for k,v in object.items()} listOfKeys = [k.lower() for k in listOfKeys] key = max( [str(k) for k in listOfKeys], key=len ) if len(listOfKeys) else None if key and key in listOfKeys: listOfKeys.remove(key) return( object.get( key, getFromLongestMatchingKey(object, listOfKeys) ) if key else None )
25271697197c5c16c2ad5ae7320fc452bc3c8205
8,696
def key_type(key): """String identifying if the key is a 'name' or an 'ID', or '' for None. This is most useful when paired with key_id_or_name_as_string. Args: key: A datastore Key Returns: The type of the leaf identifier of the Key, 'ID', 'name', or ''. """ if key.id(): return 'ID' elif key.name(): return 'name' else: return ''
8d055fc97313b7f613e5927d0f8f38d060a2cb2b
8,698
def other_classes(nb_classes, class_ind): """ Heper function that returns a list of class indices without one class :param nb_classes: number of classes in total :param class_ind: the class index to be omitted :return: list of class indices without one class """ other_classes_list = list(range(nb_classes)) other_classes_list.remove(class_ind) return other_classes_list
05b88e49827523508b14400aa83aa83dd48f2b2e
8,700
from typing import Dict from io import StringIO def interpolate(s: str, registers: Dict) -> str: """ Interpolates variables in a string with values from a supplied dictionary of registers. The parser is very lax and will not interpolate variables that don't exist, as users may not be intending to interpolate a variable when they type the hash character. The hash symbols can be escaped with a caret (^), but if a caret is placed before a character that doesn't need escaping (another caret or a hash character), then the escape character acts as a normal character (nothing is removed or replaced). Parameters ---------- s : str The string to interpolate registers : Dict[str, Message] A mapping of variable names to messages Returns ------- str A new string with interpolated values """ escaping = False variable_mode = False buffer = StringIO() variable_buffer = StringIO() for i in range(0, len(s)): if escaping: if s[i] != "#": buffer.write("^") buffer.write(s[i]) escaping = False elif variable_mode: if s[i] == "#": name = variable_buffer.getvalue() if name in registers: buffer.write(registers[name].content) else: buffer.write("#") buffer.write(name) buffer.write("#") variable_buffer = StringIO() variable_mode = False elif s[i] != " ": variable_buffer.write(s[i]) else: # invalid variable name buffer.write("#") buffer.write(variable_buffer.getvalue()) buffer.write(s[i]) variable_buffer = StringIO() variable_mode = False elif s[i] == "^": escaping = True elif s[i] == "#": variable_mode = True else: buffer.write(s[i]) if escaping: buffer.write("^") if len(variable_buffer.getvalue()): buffer.write("#") buffer.write(variable_buffer.getvalue()) return buffer.getvalue()
a877e455771e09bcca85455ffefe87c4622255f2
8,702
def get_fully_qualified_class_name(cls): """Returns fully dot-qualified path of a class, e.g. `ludwig.models.trainer.TrainerConfig` given `TrainerConfig`.""" return ".".join([cls.__module__, cls.__name__])
dc3cbbb8be4503b562a381aa45842399a623971e
8,707
def as_chunks(l, num): """ :param list l: :param int num: Size of split :return: Split list :rtype: list """ chunks = [] for i in range(0, len(l), num): chunks.append(l[i:i + num]) return chunks
6bf6a2efed8e4830447319dd1624e70463faaf41
8,709
def _name_to_agent_class(name: str): """ Convert agent name to class. This adds "Agent" to the end of the name and uppercases the first letter and the first letter appearing after each underscore (underscores are removed). :param name: name of agent, e.g. local_human :return: class of agent, e.g. LocalHumanAgent. """ words = name.split('_') class_name = '' for w in words: # capitalize the first letter class_name += w[0].upper() + w[1:] # add Agent to the end of the name class_name += 'Agent' return class_name
6ac0dbf4fb8ab90e592b85216be6d9c109a1310c
8,711
def bin_search_recursive(array, what_to_find, left=0, right=None): """ Finds element in a sorted array using recursion. :param list array: A sorted list of values. :param what_to_find: An item to find. :returns: Index of the searchable item or -1 if not found. """ right = right if right is not None else len(array) - 1 if left > right: return -1 # Searchable not found middle_pos = (left + right) // 2 if array[middle_pos] == what_to_find: return middle_pos if what_to_find < array[middle_pos]: return bin_search_recursive(array, what_to_find, left=left, right=middle_pos - 1) return bin_search_recursive(array, what_to_find, left=middle_pos + 1, right=right)
83ff4dbcd9cab179c5e83f73d5fdc7c5a6bca4d4
8,712
from typing import List def extensions_to_glob_patterns(extensions: List) -> List[str]: """Generate a list of glob patterns from a list of extensions. """ patterns: List[str] = [] for ext in extensions: pattern = ext.replace(".", "*.") patterns.append(pattern) return patterns
a04ed356bfa5db7c0210b86dff832d32bfef6dbf
8,713
import re def get_operation_id_groups(expression): """Takes an operator expression from an .mmcif transformation dict, and works out what transformation IDs it is referring to. For example, (1,2,3) becomes [[1, 2, 3]], (1-3)(8-11,17) becomes [[1, 2, 3], [8, 9, 10, 11, 17]], and so on. :param str expression: The expression to parse. :rtype: ``list``""" if expression[0] != "(": expression = "({})".format(expression) groups = re.findall(r"\((.+?)\)", expression) group_ids = [] for group in groups: ids = [] elements = group.split(",") for element in elements: if "-" in element: bounds = [int(x) for x in element.split("-")] ids += [str(n) for n in list(range(bounds[0], bounds[1] + 1))] else: ids.append(element) group_ids.append(ids) return group_ids
8ec6fdca5209de1d658a2ae938fc840e9d1b0c23
8,714
import textwrap def text_word_wrap(text, width): """ Word-wrap a string to width `width`. """ return textwrap.wrap(text, width)
7dbaae3a61be37a3208dd9c9b6a541aadb325e3e
8,719
def get_cost(ss, a, dist_mat, C, n): """Determines the cost of an airline route network from a sampleset. Args: - ss: Sampleset dictionary. One solution returned from the hybrid solver. - a: Float in [0.0, 1.0]. Discount allowed for hub-hub legs. - dist_mat: Numpy matrix providing distance between cities i and j. - C: Numpy matrix. Represents airline leg cost. - n: Int. Number of cities in play. Returns: - cost: Cost of provided route network. """ cost = 0 for i in range(n): for j in range(i+1, n): cost += dist_mat[i][j]*(C[i][ss[i]] + C[j][ss[j]] + a*C[ss[i]][ss[j]]) return cost
d8e810133a08213d0815a551c1fd7eaaa650532f
8,722
def _convert_to_float(score): """ Convert a string ('score') to float. If the string is empty, return None. If the string is float-like, return its float. """ if len(score) == 0: return None else: return float(score)
48cbc42310595d5a6ae8d8296feb7d81e61d52dc
8,724
def determine_suitable_lda(displace_elev, margin): """Given a displacement elevation and safety marign, determine the minimum acceptable LDA settings.""" return displace_elev * margin
6db43c125ee98bfefe91f9d44c601dcacdf7aff3
8,727
import torch def camera_rays(camK, W=None, H=None, c2w=None, graphics_coordinate=True, center=False): """shoot viewing rays from camera parameters. Args: camK: Tensor of shape `[3,3]`, the intrinsic matrix. W: Integer, if set None, then `W` is calculated as `2*cx`. H: Integer, if set None, then `H` is calculated as `2*cy`. c2w: Tensor of shape `[4,4]` or `[3,4]` camera view matrix. If `None`, c2w is set as `[I,0]` graphics_coordinate: bool. Where or not use graphics coordinate (pointing negative z into screen). Default: `True`. center: bool. Where or set 0.5 offset for pixels Default: `False`. Returns: rays_o: tensor of shape `[W,H,3]` origins of the rays. rays_d: tensor of shape `[W,H,3]` directions of the rays. """ if c2w is None: c2w = torch.hstack((torch.eye(3), torch.zeros((3, 1)))) if W is None: W = camK[0, 2]*2 if H is None: H = camK[1, 2]*2 W = int(W) H = int(H) invK = torch.inverse(camK) u, v = torch.meshgrid(torch.linspace(0, W-1, W), torch.linspace(0, H-1, H)) u = u.t() v = v.t() if center: u = u + 0.5 v = v + 0.5 dirs = torch.stack([u, v, torch.ones_like(u)], dim=-1) dirs = torch.matmul(dirs, invK.T) # use graphics coordinate. negtive z pointing into screen. if graphics_coordinate: dirs[..., 1] *= -1 dirs[..., 2] *= -1 rays_d = torch.matmul(dirs, c2w[:3, :3].T) rays_o = c2w[:3, -1].expand(rays_d.shape) return torch.cat([rays_o, rays_d], dim=-1)
c572283305dccc243de1bd956d11b7fd2ff42726
8,737
def get_rpm_properties(input_file: str): """ Summary: processes the structured name of the rpm file to get the arch, release, version, and name Parameters: input_file (str): the file Returns: dictionary containing arch, release, version, and name """ #get properties from rpm_file name arch = input_file.rsplit('.', 2)[1] release = input_file.rsplit('.', 2)[0].rsplit('-', 1)[1] version = input_file.rsplit('.', 2)[0].rsplit('-', 1)[0].rsplit('-', 1)[1] name = input_file.rsplit('.', 2)[0].rsplit('-', 1)[0].rsplit('-', 1)[0] #put into dictionary output = { 'arch': arch, 'release': release, 'version': version, 'name': name } #output return output
291171913e80ede385a464c525fc44e87aeaf41b
8,739
def withRequest(f): """ Decorator to cause the request to be passed as the first argument to the method. If an I{xmlrpc_} method is wrapped with C{withRequest}, the request object is passed as the first argument to that method. For example:: @withRequest def xmlrpc_echo(self, request, s): return s @since: 10.2 """ f.withRequest = True return f
f7fd8da601300aef722eb6706d111a54383648c0
8,740
import torch def calc_init_centroid(images, num_spixels_width, num_spixels_height): """ calculate initial superpixels Args: images: torch.Tensor A Tensor of shape (B, C, H, W) num_spixels_width: int A number of superpixels in each column num_spixels_height: int A number of superpixels int each row Return: centroids: torch.Tensor A Tensor of shape (B, C, H * W) init_label_map: torch.Tensor A Tensor of shape (B, H * W) """ batchsize, channels, height, width = images.shape device = images.device # 自适应平均池化 centroids = torch.nn.functional.adaptive_avg_pool2d(images, (num_spixels_height, num_spixels_width)) with torch.no_grad(): num_spixels = num_spixels_width * num_spixels_height # 一共多少个superpixel labels = torch.arange(num_spixels, device=device).reshape(1, 1, *centroids.shape[-2:]).type_as(centroids) # 假如有20个superpixel,那么从上到下从左到右,依次标号 init_label_map = torch.nn.functional.interpolate(labels, size=(height, width), mode="nearest") # 每个像素标记初始superpixel标签,即按照上一步的图形,按照矩形进行标号 init_label_map = init_label_map.repeat(batchsize, 1, 1, 1) # 实现batchsize维度 init_label_map = init_label_map.reshape(batchsize, -1) # batch中每张图都展平(这个是pixel到superpixel的映射,因此channel维度是1 centroids = centroids.reshape(batchsize, channels, -1) # batch中每张图的每个channel维度上展平 return centroids, init_label_map
cfa87a57d4cb6b194da4ae6316433adefb8cfbe1
8,742
def deepmerge(a, b): """ Merge dict structures and return the result. >>> a = {'first': {'all_rows': {'pass': 'dog', 'number': '1'}}} >>> b = {'first': {'all_rows': {'fail': 'cat', 'number': '5'}}} >>> import pprint; pprint.pprint(deepmerge(a, b)) {'first': {'all_rows': {'fail': 'cat', 'number': '5', 'pass': 'dog'}}} """ if isinstance(a, dict) and isinstance(b, dict): return dict([(k, deepmerge(a.get(k), b.get(k))) for k in set(a.keys()).union(b.keys())]) elif b is None: return a else: return b
5d6f27d6bff8643e37398b4c3c31a0340585b88d
8,745
def _ggm_prob_wait_whitt_z(ca2, cs2): """ Equation 3.8 on p139 of Whitt (1993). Used in approximation for P(Wq > 0) in GI/G/c/inf queue. See Whitt, Ward. "Approximations for the GI/G/m queue" Production and Operations Management 2, 2 (Spring 1993): 114-161. Parameters ---------- ca2 : float squared coefficient of variation for inter-arrival time distribution cs2 : float squared coefficient of variation for service time distribution Returns ------- float approximation for intermediate term z (see Eq 3.6) """ z = (ca2 + cs2) / (1.0 + cs2) return z
91cbf519541411dec095b710e7449f3a183c20d3
8,751
def parse_method(name): """Parse hyperparameters from string name to make legend label. Parameters ---------- name : str Name of method Returns ------- string : str Formatted string """ string = r"" if name.split('es_')[1][0] == '1': string += r'ES' if name.split('vm_')[1][0] == '1': if len(string) > 0: string += r', VM' else: string += r'VM' alpha = name.split('alpha_')[1].split('_')[0] if len(string) > 0: string += r', $\alpha=%s$' % alpha else: string += r'$\alpha=%s$' % alpha return string
eb6824a6ab7ca126c924fa7acca2725f8b06379e
8,752
def _get_file_contents(path): """ Gets the contents of a specified file, ensuring that the file is properly closed when the function exits """ with open(path, "r") as f: return f.read()
cfe84e52e2ac48d3f7d9d20fd1c85c71a222ef95
8,754
def preprocess_int(i, **options): """Convert a string to an integer.""" return str(i)
7d40ef9e0547aaeb635068c11d91e84531e0ae4a
8,760
def convert_string_to_list(df, col, new_col): """Convert column from string to list format.""" fxn = lambda arr_string: [int(item) for item in str(arr_string).split(" ")] mask = ~(df[col].isnull()) df[new_col] = df[col] df.loc[mask, new_col] = df[mask][col].map(fxn) return df
cc0e04fbe6b5523647ceb954fc4c17e78f2a8554
8,762
from pathlib import Path def raw_directory(request): """Gets 'raw' directory with test datafiles""" return Path(request.config.rootdir) / "tests/testdata/2020/6/raw"
efbdf7c5966578e2180ea4f9db0580420706ec23
8,763
def raw_formatter(subtitles): """ Serialize a list of subtitles as a newline-delimited string. """ return ' '.join(text for (_rng, text) in subtitles)
51109a9b29c30257e9e8fa50abf1e718374a521f
8,766
def get_all_matching(cls, column_name, values): """Get all the instances of ``cls`` where the column called ``column_name`` matches one of the ``values`` provided. Setup:: >>> from mock import Mock >>> mock_cls = Mock() >>> mock_cls.query.filter.return_value.all.return_value = ['result'] Queries and returns the results:: >>> get_all_matching(mock_cls, 'a', [1,2,3]) ['result'] >>> mock_cls.a.in_.assert_called_with([1,2,3]) >>> mock_cls.query.filter.assert_called_with(mock_cls.a.in_.return_value) """ column = getattr(cls, column_name) query = cls.query.filter(column.in_(values)) return query.all()
d74ddf983e33f63dfcaf1f335c91b35faa00a651
8,769
def inc(n): """Increment an integer.""" return -~n
694ba6320b842985f87a36e452eb0f30e39442b4
8,775
def is_empty_tensor(t): """Returns whether t is an empty tensor.""" return len(t.size()) == 0
f0caf9a7b21c77a01dc088314f2d8fbbe49cf1f3
8,786
def search(variable: str, target: str) -> str: """Search serice using mwapi on wikidata Args: variable (str): variable name (?film, ?director...) target (str): value to search for Returns: str: service query """ if variable is None or target is None: return "" return f""" SERVICE wikibase:mwapi {{ bd:serviceParam wikibase:api "EntitySearch" . bd:serviceParam wikibase:endpoint "www.wikidata.org" . bd:serviceParam mwapi:search "{target}" . bd:serviceParam mwapi:language "en" . {variable} wikibase:apiOutputItem mwapi:item . }} """
3959a6c7d93e5f61f237a019ae941702df35eb31
8,789
def inner_product(x, y): """Inner product.""" return x.dot(y)
aa56c71199863b5b8764ce8e96375c8cc61378d4
8,794
def to_millis(seconds): """ Converts the time parameter in seconds to milliseconds. If the given time is negative, returns the original value. :param seconds: (Number), the given time in seconds. :return: (int), result of the conversation in milliseconds. """ if seconds >= 0: return int(seconds * 1000) return seconds
818409afa643dbb8de73c35348a08508227b75a3
8,795
import re def cassini_time(time): """Parse Cassini time. Parameters ---------- time: str, int or float Cassini time. Returns ------- float Parsed Cassini time as float. Raises ------ ValueError If the input time pattern is invalid. Examples -------- >>> cassini_time('v1487096932_1') 1487096932.0 >>> cassini_time(1483230358.172) 1483230358.172 """ cassini_time = re.findall(r'(\d{10})(\.\d+)?', str(time)) if not cassini_time: raise ValueError(f'Cassini time invalid: `{time}`') return float(''.join(cassini_time[0]))
bc14c2803e04ed690fac75eb32d72b27a803f1ad
8,806
def pods_by_uid(pods): """Construct a dict of pods, keyed by pod uid""" return {pod["metadata"]["uid"]: pod for pod in pods}
44b4167c561e494700e56a4967f731e0bef48aab
8,808
def _tags_conform_to_filter(tags, filter): """Mirrors Bazel tag filtering for test_suites. This makes sure that the target has all of the required tags and none of the excluded tags before we include them within a test_suite. For more information on filtering inside Bazel, see com.google.devtools.build.lib.packages.TestTargetUtils.java. Args: tags: all of the tags for the test target filter: a struct containing excluded_tags and required_tags Returns: True if this target passes the filter and False otherwise. """ # None of the excluded tags can be present. for exclude in filter.excluded_tags: if exclude in tags: return False # All of the required tags must be present. for required in filter.required_tags: if required not in tags: return False # All filters have been satisfied. return True
1db9528e11d1b513690af14f1d8453f8b0682d34
8,809
import torch def kronecker(mat1, mat2): """ kronecker product between 2 2D tensors :param mat1: 2d torch.Tensor :param mat2: 2d torch.Tensor :return: kronecker product of mat1 and mat2 """ s1 = mat1.size() s2 = mat2.size() return torch.ger(mat1.view(-1), mat2.view(-1)).reshape(*(s1 + s2)).permute([0, 2, 1, 3]).reshape(s1[0] * s2[0], s1[1] * s2[1])
930ac9827b92848656b6579c173b2d7675b7e657
8,813
def weighted_score(raw_earned, raw_possible, weight): """ Returns a tuple that represents the weighted (earned, possible) score. If weight is None or raw_possible is 0, returns the original values. When weight is used, it defines the weighted_possible. This allows course authors to specify the exact maximum value for a problem when they provide a weight. """ assert raw_possible is not None cannot_compute_with_weight = weight is None or raw_possible == 0 if cannot_compute_with_weight: return raw_earned, raw_possible else: return float(raw_earned) * weight / raw_possible, float(weight)
98ec27ebe606586811945650c18772801edd80a0
8,817
def harmonic_epmi_score(pdict, wlist1, wlist2): """ Calculate harmonic mean of exponentiated PMI over all word pairs in two word lists, given pre-computed PMI dictionary - If harmonic ePMI is undefined, return -inf """ total_recip_epmi = None # Number of pairs for which PMI exists N = 0 for word1 in wlist1: for word2 in wlist2: # Enforce alphabetical order in pair pair = tuple(sorted([word1, word2])) wi, wj = pair if wi in pdict and wj in pdict[wi]: if total_recip_epmi is None: total_recip_epmi = 0 total_recip_epmi += 1/(2**pdict[wi][wj]) N += 1 if total_recip_epmi is not None: return N/total_recip_epmi else: return float("-inf")
5aec36df72e22fecbb1dfdcbc6ec840944a40d8d
8,820
def optional_apply(f, value): """ If `value` is not None, return `f(value)`, otherwise return None. >>> optional_apply(int, None) is None True >>> optional_apply(int, '123') 123 Args: f: The function to apply on `value`. value: The value, maybe None. """ if value is not None: return f(value)
dfa5b6793d7226370a27d6a638c0a5bc975f78d4
8,822
def sort_data(data, cols): """Sort `data` rows and order columns""" return data.sort_values(cols)[cols + ['value']].reset_index(drop=True)
33acbfd9be36d187120564f1792147b644b6c394
8,825