content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import builtins def no_matplotlib(monkeypatch): """ Mock an import error for matplotlib""" import_orig = builtins.__import__ def mocked_import(name, globals, locals, fromlist, level): """ """ if name == 'matplotlib.pyplot': raise ImportError("This is a mocked import error") return import_orig(name, globals, locals, fromlist, level) monkeypatch.setattr(builtins, '__import__', mocked_import)
681ba8c0e70387e46ad7ed42ffb11ce8aa7f23bc
703,154
import tqdm def create_tqdm_reader(reader, max_reads=None): """Wrap an iterable in a tqdm progress bar. Args: reader: The iterable to wrap. max_reads: Max number of items, if known in advance. Returns: The wrapped iterable. """ return tqdm.tqdm(reader, total=max_reads)
29bd93b85ace167f5586ac275510e82f7bbe8223
703,155
import math def get_sequence_of_considered_visits(max_num_considered_actions, num_simulations): """Returns a sequence of visit counts considered by Sequential Halving. Sequential Halving is a "pure exploration" algorithm for bandits, introduced in "Almost Optimal Exploration in Multi-Armed Bandits": http://proceedings.mlr.press/v28/karnin13.pdf The visit counts allows to implement Sequential Halving by selecting the best action from the actions with the currently considered visit count. Args: max_num_considered_actions: The maximum number of considered actions. The `max_num_considered_actions` can be smaller than the number of actions. num_simulations: The total simulation budget. Returns: A tuple with visit counts. Length `num_simulations`. """ if max_num_considered_actions <= 1: return tuple(range(num_simulations)) log2max = int(math.ceil(math.log2(max_num_considered_actions))) sequence = [] visits = [0] * max_num_considered_actions num_considered = max_num_considered_actions while len(sequence) < num_simulations: num_extra_visits = max(1, int(num_simulations / (log2max * num_considered))) for _ in range(num_extra_visits): sequence.extend(visits[:num_considered]) for i in range(num_considered): visits[i] += 1 # Halving the number of considered actions. num_considered = max(2, num_considered // 2) return tuple(sequence[:num_simulations])
f0081ae5bfe25d6a3eaad9f032cfb88a403fbb45
703,157
def _validate_int( setting, value, option_parser, config_parser=None, config_section=None ) -> int: """Validate an integer setting.""" return int(value)
9036b1b043bd2463cad4f26780d47e80aa404f73
703,161
def sub(arg1, arg2): """ Function that subtracts two arguments. """ return arg1 - arg2
fb3694bc0827f62befe67cb58c1edb9de1adb808
703,165
def query_for_data(driver): """Grab all relevant data on a jobs page. Return: ------ job_titles: list job_locations: list posting_companies: list dates: list hrefs: list """ job_titles = driver.find_elements_by_xpath( "//span[@itemprop='title']") job_locations = driver.find_elements_by_xpath( "//div[@itemprop='jobLocation']") posting_companies = driver.find_elements_by_xpath( "//span[@itemprop='name']") dates = driver.find_elements_by_xpath( "//time[@itemprop='datePosted']") hrefs = driver.find_elements_by_xpath("//div//article//div//h2//a") return job_titles, job_locations, posting_companies, dates, hrefs
d3a44ec2e66f9c8ba09dac45dc253c2dd67303c4
703,166
import typing def split_line(line: str) -> typing.Tuple[str, str]: """ Separates the raw line string into two strings: (1) the command and (2) the argument(s) string :param line: :return: """ index = line.find(' ') if index == -1: return line.lower(), '' return line[:index].lower(), line[index:].strip()
964877ebe0e63161f449a1d60542fbcab451de28
703,167
from dateutil.parser import parse def is_datetime_string(string: str) -> bool: """ Check if the string is date-like. Parameters ---------- string : str Returns ------- is_date: bool """ try: parse(string) return True except ValueError: return False
ec26eab5d25c2b130efbf32304b2b79f8292a6e1
703,174
def _fspath(path): """Return the path representation of a path-like object. If str or bytes is passed in, it is returned unchanged. Otherwise the os.PathLike interface is used to get the path representation. If the path representation is not str or bytes, TypeError is raised. If the provided path is not str, bytes, or os.PathLike, TypeError is raised. """ if isinstance(path, (str, bytes)): return path # Work from the object's type to match method resolution of other magic # methods. path_type = type(path) try: path_repr = path_type.__fspath__(path) except AttributeError: if hasattr(path_type, '__fspath__'): raise else: raise TypeError("expected str, bytes or os.PathLike object, " "not " + path_type.__name__) if isinstance(path_repr, (str, bytes)): return path_repr else: raise TypeError("expected {}.__fspath__() to return str or bytes, " "not {}".format(path_type.__name__, type(path_repr).__name__))
dfffc592e8e03095c91653327d673e424480dfcf
703,180
def hcolor(data, thresholds): """ Multicolor a graph according to thresholds :param data: the data :type data: list of tuples (info, value) :param thresholds: dict of thresholds, format {<threshold>: <color>,} :type thresholds: dict :return: the colored graph :rtype: list of arrays """ ret = [] for info, value in data: newval = [] minover = None maxt = 0 for t in thresholds: if maxt < t: maxt = t if value > t: newval.append((t, thresholds[t])) else: if minover is None or minover > t: minover = t if minover is None: minover = maxt newval.append((value, thresholds[minover])) ret.append((info, newval)) return ret
00b18c204ea97a7f5d919122b08488c42a25e9da
703,181
def euler_problem_6(n=100): """ The sum of the squares of the first ten natural numbers is, 1^2 + 2^2 + ... + 10^2 = 385 The square of the sum of the first ten natural numbers is, (1 + 2 + ... + 10)^2 = 55^2 = 3025 Hence the difference between the sum of the squares of the first ten natural numbers and the square of the sum is 3025 - 385 = 2640. Find the difference between the sum of the squares of the first one hundred natural numbers and the square of the sum. """ # looks like brute force gives you O(n) or O(n logn), which is not bad... # but we can do better with mathematical insight. def sum_of_integer_squares(k): """ Use the formula 1^2 + 2^2 + ... + n^2 = (n * (n+1) * (2n+1)) / 6. """ return (k * (k + 1) * (2 * k + 1)) / 6 def square_of_integer_sums(k): """ Use the formula 1 + 2 + ... + n = n (n+1) / 2. """ return (k * (k + 1) / 2) ** 2 # O(logn) basic operations sqsum = square_of_integer_sums(n) sumsq = sum_of_integer_squares(n) return int(sqsum - sumsq)
550d29deea17b3047bc869a134837d4f5c1baf95
703,182
def get_num_classes(dataset_name: str) -> int: """ Get the number of supervised loss given a dataset name. :param dataset_name: dataset name :return: number of supervised class. """ if dataset_name == "cifar10": return 10 elif "cifar100" in dataset_name: return 100 else: raise ValueError("Supported datasets are only original CIFAR10/100")
dc699aeaef87b1763c9986cda596b920156e2478
703,187
def get_bsse_section(natoms_a, natoms_b, mult_a=1, mult_b=1, charge_a=0, charge_b=0): """Get the &FORCE_EVAL/&BSSE section.""" bsse_section = { 'FORCE_EVAL': { 'BSSE' : { 'FRAGMENT': [{ 'LIST': '1..{}'.format(natoms_a) }, { 'LIST': '{}..{}'.format(natoms_a + 1, natoms_a + natoms_b) }], 'CONFIGURATION': [ { # A fragment with basis set A 'MULTIPLICITY': mult_a, 'CHARGE': charge_a, 'GLB_CONF': '1 0', 'SUB_CONF': '1 0', }, { # B fragment with basis set B 'MULTIPLICITY': mult_b, 'CHARGE': charge_b, 'GLB_CONF': '0 1', 'SUB_CONF': '0 1', }, { # A fragment with basis set A+B 'MULTIPLICITY': mult_a, 'CHARGE': charge_a, 'GLB_CONF': '1 1', 'SUB_CONF': '1 0', }, { # B fragment with basis set A+B 'MULTIPLICITY': mult_b, 'CHARGE': charge_b, 'GLB_CONF': '1 1', 'SUB_CONF': '0 1', }, { # A+B fragments with basis set A+B 'MULTIPLICITY': mult_a + mult_b - 1, 'CHARGE': charge_a + charge_b, 'GLB_CONF': '1 1', 'SUB_CONF': '1 1', } ] } } } return bsse_section
61c9398ed35eaaf2212c2c1a66e2cf43b9bbe029
703,189
import re def load_placement(placement_file): """ Loads VPR placement file. Returns a tuple with the grid size and a dict indexed by locations that contains top-level block names. """ RE_PLACEMENT = re.compile( r"^\s*(?P<net>\S+)\s+(?P<x>[0-9]+)\s+(?P<y>[0-9]+)\s+(?P<z>[0-9]+)" ) RE_GRID_SIZE = re.compile( r"Array size:\s+(?P<x>[0-9]+)\s+x\s+(?P<y>[0-9]+)\s+logic blocks" ) # Load the file with open(placement_file, "r") as fp: lines = fp.readlines() # Parse grid_size = None placement = {} for line in lines: line = line.strip() if line.startswith("#"): continue # Placement match = RE_PLACEMENT.match(line) if match is not None: loc = (int(match.group("x")), int(match.group("y"))) placement[loc] = match.group("net") # Grid size match = RE_GRID_SIZE.match(line) if match is not None: grid_size = (int(match.group("x")), int(match.group("y"))) return grid_size, placement
72b534b5c8597f4a42d02e041c69a8fc3c92e8f7
703,190
def license_path(licenses): """Get license path.""" # return license if there is exactly one license return licenses[0] if len(licenses) == 1 else None
b8194e099c4516627edab6c4538e5dfcdc6600a3
703,192
from datetime import datetime def complete_month(year, month): """ Return a string with the month number padded with zero if the month has only one digit. It is also necessary to provide a year. :param year: :param month: :return: Month number padded with zero. :rtype: str """ return datetime(year, month, 1).strftime("%m")
03915be101c0f418caa78ae6bc423273ad3af24c
703,194
def scrub_email(address): """ Remove the local-part from an email address for the sake of anonymity :param address: <str> :return: <str> """ if '@' in address: domain = address.split('@')[1] return 'user@{}'.format(domain) else: return address
90b54f3a06f3fe50b354138113c27e980c01c59c
703,197
def symm_area(col, n): """ returns n + (n - 1) + ... + (n - col + 1) i.e., the number of matrix elements below and including the diagonal and from column 0 to column `col` """ return col * (2 * n - col + 1) // 2
e5c7970ee2b612f4678952056be0358c968e06b3
703,200
def find_device(p, tags): """ Find an audio device to read input from """ device_index = None for i in range(p.get_device_count()): devinfo = p.get_device_info_by_index(i) print("Device %d: %s" % (i, devinfo["name"])) for keyword in tags: if keyword in devinfo["name"].lower(): print("Found an input: device %d - %s"%(i, devinfo["name"])) device_index = i return device_index if device_index is None: print("No preferred sound input found; using default input device.") return device_index
41428447dc39be8fa06ede59816a7aca9d5bffee
703,202
def normalize_lons(l1, l2): """ An international date line safe way of returning a range of longitudes. >>> normalize_lons(20, 30) # no IDL within the range [(20, 30)] >>> normalize_lons(-17, +17) # no IDL within the range [(-17, 17)] >>> normalize_lons(-178, +179) [(-180, -178), (179, 180)] >>> normalize_lons(178, -179) [(-180, -179), (178, 180)] >>> normalize_lons(179, -179) [(-180, -179), (179, 180)] >>> normalize_lons(177, -176) [(-180, -176), (177, 180)] """ if l1 > l2: # exchange lons l1, l2 = l2, l1 delta = l2 - l1 if l1 < 0 and l2 > 0 and delta > 180: return [(-180, l1), (l2, 180)] elif l1 > 0 and l2 > 180 and delta < 180: return [(l1, 180), (-180, l2 - 360)] elif l1 < -180 and l2 < 0 and delta < 180: return [(l1 + 360, 180), (l2, -180)] return [(l1, l2)]
c0d58aa7be8409d6337f0fa8b753f5ef30f531e5
703,203
from typing import Tuple def _unmerge_points( board_points: Tuple[int, ...] ) -> Tuple[Tuple[int, ...], Tuple[int, ...]]: """Return player and opponent board positions starting from their respective ace points.""" player: Tuple[int, ...] = tuple( map( lambda n: 0 if n < 0 else n, board_points, ) ) opponent: Tuple[int, ...] = tuple( map( lambda n: 0 if n > 0 else -n, board_points[::-1], ) ) return player, opponent
25965e023030266cc92e6b1456483204ad2c863a
703,206
import json def from_json_string(my_str): """Returns an object (Python data structure) represented by a JSON string: Arguments: my_str (obj) -- json str Returns: obj -- object """ return json.loads(my_str)
cb013514b62456d6c628cf4ebea475b54851dfa4
703,208
def get_bse(da, da_peak_times): """ Takes an xarray DataArray containing veg_index values and calculates the vegetation value base (bse) for each timeseries per-pixel. The base is calculated as the mean value of two minimum values; the min of the slope to the left of peak of season, and the min of the slope to the right of the peak of season. Users must provide an existing peak of season (pos) data array, which can either be the max of the timeseries, or the middle of season (mos) values. Parameters ---------- da: xarray DataArray A two-dimensional or multi-dimensional DataArray containing an array of veg_index values. da_peak_times: xarray DataArray An xarray DataArray type with an x and y dimension (no time). Each pixel must be the time (day of year) value calculated at either at peak of season (pos) or middle of season (mos) prior. Returns ------- da_bse_values : xarray DataArray An xarray DataArray type with an x and y dimension (no time). Each pixel is the base (bse) veg_index value detected at across the timeseries at each pixel. """ # notify user print('Beginning calculation of base (bse) values (times not possible).') # get vos values (min val in each pixel timeseries) print('> Calculating base (bse) values.') # split timeseries into left and right slopes via provided peak/middle values slope_l = da.where(da['time.dayofyear'] <= da_peak_times).min('time') slope_r = da.where(da['time.dayofyear'] >= da_peak_times).min('time') # get per pixel mean of both left and right slope min values da_bse_values = (slope_l + slope_r) / 2 # convert type da_bse_values = da_bse_values.astype('float32') # rename da_bse_values = da_bse_values.rename('bse_values') # notify user print('> Success!\n') return da_bse_values
3edaf6156bd9fdae15c3bf845eb3deb293489cfb
703,210
def get_dict_key_by_value(val, dic): """ Return the first appeared key of a dictionary by given value. Args: val (Any): Value of the key. dic (dict): Dictionary to be checked. Returns: Any, key of the given value. """ for d_key, d_val in dic.items(): if d_val == val: return d_key return None
d01522a61d7a0549ed54bfcb620da10857d67ae7
703,212
import json def isJson(var=''): """ Check json >>> isJson(var='') False >>> isJson('') False >>> isJson('{}') True """ result = True try: json.loads(var) except Exception as e: result = False return result
dc146fff1449df844ce0ac00607d77b5e2dc4370
703,213
def count_ontarget_samples(df, human_readable=False): """ Function to count usable samples. Parameters ---------- df: DataFrame human_readable: Boolean, optional default=False Returns ------- ontarget_counts: DataFrame MultiIndexed if human_readable, otherwise "step" by "participant" """ ontarget_counts = df[ (df["ontarget"]==True) ][ ["step", "target", "participant", "ontarget"] ].groupby( ["step", "target", "participant"] ).count().unstack(fill_value=0) if human_readable: return(ontarget_counts) ontarget_counts.set_index( ontarget_counts.index.droplevel("target"), inplace=True ) ontarget_counts.columns = ontarget_counts.columns.droplevel() return(ontarget_counts)
3bb2532017089ab08ac53422baaa55a5b38ee4e3
703,214
import csv def _read_file_to_dict(path): """ Load the problems and the corresponding labels from the *.txt file. :param path: The full path to the file to read :return: The dictionary with the problem names as keys and the true class labels as values """ label_dict = {} with open(path, 'r', encoding='utf-8-sig') as truth_file: truth = csv.reader(truth_file, delimiter=' ') for problem in truth: label_dict[problem[0]] = problem[1] return label_dict
83bd3b04afc995176dc4dfefb9863b9f1ba09888
703,216
def trace(fn): """Decorator that marks a function to be traced.""" fn.should_trace = True return fn
598d81b2f4050b78cd42c835c5ce3bcc41c87541
703,218
def get_sm_tag_from_alignedseg(aln): """Get 'sm' tag from AlignedSegment.""" try: return aln.get_tag('sm') except Exception as e: raise ValueError("Could not get 'sm' tag from {aln}".format(aln=aln))
ca23604f724f75bf4c399374547f1468c6c5df9b
703,222
def specific_gravity(temp, salinity, pressure): """Compute seawater specific gravity. sg = C(p) + β(p)S − α(T, p)T − γ(T, p)(35 − S)T units: p in “km”, S in psu, T in ◦C C = 999.83 + 5.053p − .048p^2 β = .808 − .0085p α = .0708(1 + .351p + .068(1 − .0683p)T) γ = .003(1 − .059p − .012(1 − .064p)T) For 30 ≤ S ≤ 40, −2 ≤ T ≤ 30, p ≤ 6 km: good to .16 kg/m3 For 0 ≤ S ≤ 40, good to .3 kg/m3 """ C = 999.83 + 5.053 * pressure - 0.048 * pressure * pressure beta = 0.808 - 0.0085 * pressure alpha = 0.0708 * (1.0 + 0.351*pressure + 0.068 * (1 - 0.0683*pressure) * temp) gamma = 0.003 * (1.0 - 0.059*pressure - 0.012 * (1.0 - 0.064*pressure) * temp) sg = C + beta * salinity - alpha * temp - gamma * (35 - salinity) * temp return sg
37ee32d3842cd5f9645449b23feb4d8315536fe2
703,227
def reverse_bits(num): """ reverses the bits representing the number :param num: a number treated as being 32 bits long :return: the reversed number """ result = 0 for i in range(32): result <<= 1 result |= num & 1 num >>= 1 return result
262e589cf366065018a57cd6a6c443bdd8eb638e
703,235
import requests def convert_using_api(from_currency, to_currency): """ convert from from_currency to to_currency by requesting API """ convert_str = from_currency + '_' + to_currency options = {'compact': 'ultra', 'q': convert_str} api_url = 'https://free.currencyconverterapi.com/api/v5/convert' result = requests.get(api_url, params=options).json() return result[convert_str]
f261dcf6c97a8e5697e6b1005513b34f755f541f
703,239
import re def remove_brackets(s): """Remove brackets [] () from text """ return re.sub(r'[\(\(].*[\)\)]', '', s)
82685dfa66c2b1185a3e106f7289af5856c8e56e
703,244
def can_embed_image(repo, fname): """True if we can embed image file in HTML, False otherwise.""" if not repo.info.embed_images: return False return ("." in fname) and ( fname.split(".")[-1].lower() in ["jpg", "jpeg", "png", "gif"] )
40bfdd8c32ddd5f3d3bd2ae074494ba34e6fc1f1
703,247
def datetime_to_isoformat(dt): #============================= """ Convert a Python datetime to an ISO 8601 representation. :param dt: A Python :class:~`datetime.datetime`. :return: A string representation of the date and time formatted as ISO 8601. """ iso = dt.isoformat() if iso.endswith('+00:00'): return iso[:-6] + 'Z' else: return iso
508ce4ea3e0905aab0b16c6b28fa4e9304e18b08
703,249
def get_mode_from_params(params): """Returns the mode in which this script is running. Args: params: Params tuple, typically created by make_params or make_params_from_flags. Raises: ValueError: Unsupported params settings. """ if params.forward_only and params.eval: raise ValueError('Only one of forward_only and eval parameters is true') if params.eval: return 'evaluation' if params.forward_only: return 'forward-only' return 'training'
35564684eef73adf821989dea27bfdc7de0443ae
703,258
def quote_logvalue(value): """Return a value formatted for use in a logfmt log entry. The input is quoted if it contains spaces or quotes; otherwise returned unchanged """ s = str(value) if " " in s or '"' in s: s = s.replace('"', "\\" + '"') return f'"{s}"' return s
15dd0789b5a7ce4e18eece37ad0cac59d9cd2332
703,259
def flatten(master): """ :param dict master: a multilevel dictionary :return: a flattened dictionary :rtype: dict Flattens a multilevel dictionary into a single-level one so that:: {'foo': {'bar': { 'a': 1, 'b': True, 'c': 'hello', }, }, } would become:: {'foo.bar.a': 1, 'foo.bar.b': True, 'foo.bar.a': 1, } You can mix and match both input (hierarchical) and output (dotted) formats in the input without problems - and if you call flatten more than once, it has no effect. """ result = {} def add(value, *keys): if keys in result: raise ValueError('Duplicate key %s' % keys) result[keys] = value def recurse(value, *keys): if isinstance(value, dict): for k, v in value.items(): recurse(v, k, *keys) else: key = '.'.join(reversed(keys)) if key in result: raise ValueError('Duplicate key %s' % str(keys)) result[key] = value recurse(master) return result
d31325219e43ee5c047c1a78589d94e2d7c62709
703,260
def m21_midievent_to_event(midievent): """Convert a music21 MidiEvent to a tuple of MIDI bytes.""" status = midievent.data + midievent.channel - 1 return (status, midievent.pitch, midievent.velocity)
3950b4e6715ac4de2dbdcc2d87d5cf51387a220c
703,261
import copy def merge_dict(d1, d2, overwrite=False): """Merge contents of d1 and d2 and return the merged dictionary Note: * The dictionaries d1 and d2 are unaltered. * If `overwrite=False` (default), a `RuntimeError` will be raised when duplicate keys exist, else any existing keys in d1 are silently overwritten by d2. """ # Note: May partially be replaced by a ChainMap as of python 3.3 if overwrite is False: sd1 = set(d1.keys()) sd2 = set(d2.keys()) intersect = sd1.intersection(sd2) if len(intersect) > 0: msg = "Dictionaries to merge have overlapping keys: %s" raise RuntimeError(msg % intersect) td = copy.deepcopy(d1) td.update(d2) return td
d680dcc3039804c340fc488a488fae1d891a8d1b
703,263
def add_month(year, month, delta): """ Helper function which adds `delta` months to current `(year, month)` tuple and returns a new valid tuple `(year, month)` """ year, month = divmod(year * 12 + month + delta, 12) if month == 0: month = 12 year = year - 1 return year, month
8f509bba44bb27579b948c3b26e5f7c027be445c
703,269
import json def __get_job_obj(profile): """Return the 'job' object in the profile.""" with open(profile, 'rt') as json_fobj: data = json.load(json_fobj) return data['jobs'][0]
2af6658f8a54987229dffe35efe37d2dace9f0bb
703,270
import torch def model_fn(batch, model, criterion, device): """Forward a batch through the model.""" mels, labels = batch mels = mels.to(device) labels = labels.to(device) outs = model(mels) loss = criterion(outs, labels) # Get the speaker id with highest probability. preds = outs.argmax(1) # Compute accuracy. accuracy = torch.mean((preds == labels).float()) return loss, accuracy
2b9907e8f0fbec50b955082efb30d8cddc88b663
703,271
def get_context(canvas): """Get ``cairo.Context`` used to draw onto the canvas.""" return canvas.renderer._get_context()
1d68e6eb742dff906b6e64c85d9609e34f508b77
703,274
def fahrenheit_from(celsius): """Convert Celsius to Fahrenheit degrees.""" try: fahrenheit = float(celsius) * 9 / 5 + 32 fahrenheit = round(fahrenheit, 3) # Round to three decimal places return str(fahrenheit) except ValueError: return "invalid input"
e31ac8c62f108652fe3cc2ee1516a5b3a1a9e568
703,276
def big_endian_to_int(value): """ Ethereum RLP Utils: Convert big endian to int :param value: big ending value :return: int value """ return int.from_bytes(value, byteorder="big")
57c9b05471e3558cae1a0d36dd3089b4d180faeb
703,277
def _read_files(file_names): """ Reads content from all specified file names Args: file_names: set of file names Returns: list of lines from all files """ all_lines = [] for file_name in file_names: try: with open(file_name) as f: lines = f.read().splitlines() all_lines.extend(lines) except Exception as e: print("Skipping: {0}".format(file_name)) return all_lines
98fabeeaeaf6dd142acaf7cf84c0ac25583bcdbf
703,278
def create_track_log(db, sessionID): """ Instantiate the Track History Collection. :param db: The database object. :param sessionID: Current user's session ID. :return: The Track History Collection object. """ collection_name = 'track_history_' + sessionID track_collection = db[collection_name] return track_collection
5fb72ae83e5a805ad8e35f62c9474e51170d3fb2
703,279
def detect_api_mismatch(ref_api, other_api, *, ignore=()): """Returns the set of items in ref_api not in other_api, except for a defined list of items to be ignored in this check. By default this skips private attributes beginning with '_' but includes all magic methods, i.e. those starting and ending in '__'. """ missing_items = set(dir(ref_api)) - set(dir(other_api)) if ignore: missing_items -= set(ignore) missing_items = set(m for m in missing_items if not m.startswith('_') or m.endswith('__')) return missing_items
4353f3f6b825570e3193b57dbb08c3a26c7f59b9
703,282
from typing import Iterable import torch def fuse_single_qubit_operators( qubits: Iterable[int], operators: Iterable[torch.Tensor], ): """Multiply together gates acting on various single qubits. Suppose that we have a sequence of single-qubit gates that should act, one after the other, on designated qubits. If any qubit repeats, then we can matrix multiply the operators to reduce to a single matrix acting on each qubit. This function performs this operation and collects the "fused" operations in a simple dict. If the argument `qubits` is [2, 4, 2, 7] and the `operators` is a list with length four as in A = operators[0] B = operators[1] C = operators[2] D = operators[3], then this function will return the dict { 2: CA 4: B 7: D } where CA is the matrix multiplication of torch.matmul(C, A). Args: qubits: Iterable of integers giving the qubits that gates act on. operators: Iterable of Tensor objects specifying single qubit operators. The size should be (*batch_dims, 2, 2). Returns: Dict mapping qubits to act on to a fused gate. """ qubits_to_fused_ops = dict() for q, op in zip(qubits, operators): if q in qubits_to_fused_ops: qubits_to_fused_ops[q] = torch.matmul(op, qubits_to_fused_ops[q]) else: qubits_to_fused_ops[q] = op return qubits_to_fused_ops
640541a3b0a79deb819bafad5734aca3e0dde23d
703,291
def tag_index(idx): """Return a mapping of tag names to index items. """ tagidx = dict() for i in idx: for t in i.tags: if t not in tagidx: tagidx[t] = set() tagidx[t].add(i) return tagidx
df3ee2a934bfe3c814a9c1ded8d83314064f38bd
703,294
import math def dist(p1, p2): """ Determines the straight line distance between two points p1 and p2 in euclidean space. """ d = math.sqrt(math.pow(p1[0] - p2[0], 2) + math.pow(p1[1] - p2[1], 2)) return d
8a72ba5966452e7ac2e44f4c1f61d78071423ace
703,298
from typing import List import glob def get_l10n_files() -> List[str]: """取得所有翻譯相關檔案列表,包括.pot .po .mo。 Returns: List[str]: 翻譯相關檔案列表,包括.pot .po .mo。 """ po_parser = 'asaloader/locale/*/LC_MESSAGES/asaloader.po' pot_file = 'asaloader/locale/asaloader.pot' po_files = glob.glob(po_parser) mo_files = [po_file.replace('.po', '.mo') for po_file in po_files] files = [pot_file] + po_files + mo_files return files
a783679261e7c9bd617728946d01a440b51cfb6a
703,302
def filter_value(entry, values=None): """ Returns True if it should be filtered. Only take calls with filter values in the list provided if None provided, assume that filter_value must be PASS or blank '.' """ if values is None: return len(entry.filter) != 0 and 'PASS' not in entry.filter return values.intersection(entry.filter)
57ee5ab67fa07cb8c1379d303e9d636718025f45
703,303
def make_song_title(artists: list, name: str, delim: str) -> str: """ Generates a song title by joining the song title and artist names. Artist names given in list format are split using the given delimiter. """ return f"{delim.join(artists)} - {name}"
341db19af517c09633a6ebe37726c79c020f4780
703,305
def authorizeView(user, identifier): """ Returns True if a request to view identifier metadata is authorized. 'user' is the requestor and should be an authenticated StoreUser object. 'identifier' is the identifier in question; it should be a StoreIdentifier object. """ # In EZID, essentially all identifier metadata is public. return not identifier.isAgentPid or user.isSuperuser
c831d74a229043a308226d6ae8078e5630507ded
703,306
def clean_keyword(kw): """Given a keyword parsed from the header of one of the tutorials, return a 'cleaned' keyword that can be used by the filtering machinery. - Replaces spaces with capital letters - Removes . / and space """ return kw.strip().title().replace('.', '').replace('/', '').replace(' ', '')
eb8ab983bf60f5d1ca2996dc9568ded252d00479
703,307
from typing import Union from pathlib import Path import hashlib def compute_md5(path: Union[str, Path], chunk_size: int): """Return the MD5 checksum of a file, calculated chunk by chunk. Parameters ---------- path : str or Path Path to the file to be read. chunk_size : int Chunk size used to calculate the MD5 checksum. """ md5 = hashlib.md5() with open(str(path), "rb") as f: for chunk in iter(lambda: f.read(chunk_size), b""): md5.update(chunk) return md5.hexdigest()
9e718630323b002307a54e7d3bbf936b6b94637a
703,308
def denormalize(images, min_, max_): """scales image back to min_, max_ range""" return [((i + 1) / 2 * (max_ - min_)) + min_ for i in images]
3071e3c76754bda8ea2ce9607003cfd1b4f97e48
703,309
def version_is_locked(version): """ Determine if a version is locked """ return getattr(version, "versionlock", None)
b2f76d89c2d0082ad13d5d896e5360d394c83ee1
703,310
def mockup_return(*args, **kwargs): """Mockup to replace regular functions for error injection.""" return False
92172e58a11e48a09c8f181ac55aa717b5fbb94d
703,312
import re def parse_life_105(file): """Parse a Life 1.05 file, returning a tuple: positions: list of (x,y) co-ordinates comments: all comments in file, as a list of strings, one per line. """ lines = file.split("\n") comments = [] positions = [] ox, oy = 0, 0 x, y = ox, oy pattern_105 = r"\s*(\.|\*|o|O)+\s*\Z" for line in lines: line = line.strip().rstrip() if line.startswith("#"): # comment if line[1] in "CcDd": comments.append(line[2:]) # new block definition if line[1] in "Pp": coords = line[2:] try: ox, oy = [int(p) for p in coords.split()] x, y = ox, oy except: pass else: # skip blanks if len(line) > 0 and re.match(pattern_105, line): # only fill in points which are active for char in line: if char == "*" or char == "o" or char == "O": positions.append((x, y)) x += 1 y = y + 1 x = ox comments = "\n".join(comments) return positions, comments
3faf204bb52f5c1dd5b350401afdaa2a021f80d4
703,317
def index_of_spaces(text): """ Given text, return all space indices @param text is the string to analyze @returns a list of integers representing the indices """ res = [] for i in range(0, len(text)): if text[i] == ' ': res.append(i) return res
97b3618ffa54ee6d1b50c5bca3e196d3a6ae7f2a
703,321
def insert_at_midpoint(item, iterable): """ Inserts an item at the index of the midpoint of a list Returns that list with the item inserted """ midpoint = int(len(iterable) / 2) iterable.insert(midpoint, item) return iterable
9801e2f4cd1011914f15634898ed4d502edfee34
703,325
def get_filenames_request(products, download_directory): """Get local files url corresponding to a Copernicus request (must be already downloaded). :param products: (dict) Copernicus Hub query :param download_directory: (str) Url of folder for downloaded products :return: (list) List of strings with local urls for each product in the request """ # list of id's per requested products ids_request = list(products.keys()) # list of downloaded filenames urls filenames = [ download_directory / f"{products[file_id]['title']}.nc" for file_id in ids_request ] return filenames
5591948ce2445399d06da6c84f3fe1f6b8b4b128
703,326
def create_stream(data_type, transaction_id): """ Construct a 'createStream' message to issue a new stream on which data can travel through. :param data_type: int the RTMP datatype. :param transaction_id: int the transaction id in which the message will be sent on. """ msg = {'msg': data_type, 'command': [u'createStream', transaction_id, None]} return msg
6ceb6f259c590bdb21589c57ba053fad5c7e1851
703,327
def _version_string_to_tuple(version_string): """Convert a version_string to a tuple (major, minor, patch) of integers.""" return (int(n) for n in version_string.split('.'))
d2fe2a3d9f6f23d1d80c2808436386c18893828f
703,328
from typing import Any def is_action(value: Any) -> bool: """Returns ``True`` if the value is an action.""" return isinstance(value, dict) and "action" in value
46c691c7afd221c0f77869428535f6b943332905
703,330
def name_to_components(name): """Converts a name to a list of components. Arguments: name - Name in the format /name1=value1/name2=value2/.. Returns: list of (name, value) tuples """ ret = [] components = [x for x in name.split('/') if x] components = [x.split('=') for x in components] components = [x for x in components if len(x) == 2] for key, value in components: ret.append( (key, value) ) return ret
5683e3c4fdce53b53431484a46cd23c8959d20a2
703,334
import re def fixupAnchor(anchor): """Miscellaneous fixes to the anchors before I start processing""" # This one issue was annoying if anchor.get("title", None) == "'@import'": anchor["title"] = "@import" # css3-tables has this a bunch, for some strange reason if anchor.get("uri", "").startswith("??"): anchor["uri"] = anchor["uri"][2:] # If any smart quotes crept in, replace them with ASCII. linkingTexts = anchor.get("linking_text", [anchor.get("title")]) for i, t in enumerate(linkingTexts): if t is None: continue if "’" in t or "‘" in t: t = re.sub(r"‘|’", "'", t) linkingTexts[i] = t if "“" in t or "”" in t: t = re.sub(r"“|”", '"', t) linkingTexts[i] = t anchor["linking_text"] = linkingTexts # Normalize whitespace to a single space for k, v in list(anchor.items()): if isinstance(v, str): anchor[k] = re.sub(r"\s+", " ", v.strip()) elif isinstance(v, list): for k1, v1 in enumerate(v): if isinstance(v1, str): anchor[k][k1] = re.sub(r"\s+", " ", v1.strip()) return anchor
ba352cc5b18f82000be3943cf03db8ebcb41ddff
703,336
def has_trailing_character_return(str_multiline: str) -> bool: """ >>> has_trailing_character_return('jhgjh\\n') True >>> has_trailing_character_return('jhgjh\\ntestt') False """ if len(str_multiline) and str_multiline[-1] == '\n': preserve_trailing_linefeed = True else: preserve_trailing_linefeed = False return preserve_trailing_linefeed
21e6a7c9bb76e37ee05ed61b2012ec3a92413540
703,338
import random import string def get_random_file_name(length: int) -> str: """Returns a random file name. File name consists of lowercase letters, uppercase letters, and digits. :param length: File name length. """ return ''.join(random.choice(string.ascii_lowercase + string.digits + string.ascii_uppercase) for _ in range(length))
c6a7b2f58bc6d2eb457cee2c01222757c50f7eb9
703,348
def _iterations_implicit_bwd(res, gr): """Runs Sinkhorn in backward mode, using implicit differentiation. Args: res: residual data sent from fwd pass, used for computations below. In this case consists in the output itself, as well as inputs against which we wish to differentiate. gr: gradients w.r.t outputs of fwd pass, here w.r.t size f, g, errors. Note that differentiability w.r.t. errors is not handled, and only f, g is considered. Returns: a tuple of gradients: PyTree for geom, one jnp.ndarray for each of a and b. """ f, g, ot_prob, solver = res gr = gr[:2] return ( *solver.implicit_diff.gradient(ot_prob, f, g, solver.lse_mode, gr), None, None)
8617b6bd8cab2535409e863dae31a928f4de81db
703,353
import torch def huber_loss_temporal(dvf): """ Calculate approximated temporal Huber loss Args: dvf: (Tensor of shape (N, 2, H, W)) displacement vector field estimated Returns: loss: (Scalar) huber loss temporal """ eps = 1e-8 # numerical stability # magnitude of the dvf dvf_norm = torch.norm(dvf, dim=1) # (N, H, W) # temporal derivatives, 1st order dvf_norm_dt = dvf_norm[1:, :, :] - dvf_norm[:-1, :, :] loss = (dvf_norm_dt.pow(2) + eps).sum().sqrt() return loss
12329846e15c18ff9d59aee2f27377ce38eb8208
703,354
def _get_valid_filename(string): """Generate a valid filename from a string. Strips all characters which are not alphanumeric or a period (.), dash (-) or underscore (_). Based on https://stackoverflow.com/a/295146/4798943 Args: string (str): String file name to process. Returns: str: Generated file name. """ return "".join(c for c in string if (c.isalnum() or c in "._- "))
93777a5458c00a0a751f77953d718080cf51088e
703,362
def parse_output(filename): """ This function parses the output of a test run. For each run of the test, the program should print the following: ## ID: result: [OK|FAIL] ## ID: cycles: N_CYCLES ## ID: instructions: N_INSTR ## ID: Key: Value Multiple runs are allowed. Make sure, that you pipe the execution into a file, whose name is then passed into this function. Example: os.system("make clean all run > result.out") parsed = parse_output("result.out") This function returns a dictionary in the form: { "1": {"result": "OK", "cycles": "215", "instructions": "201"}, ... } """ parsed = {} with open(filename, "r") as _f: for line in _f.readlines(): if not line.startswith("## "): continue line = line.lstrip("# ") parts = [p.strip() for p in line.split(":")] assert len(parts) == 3, line if parts[0] not in parsed: parsed[parts[0]] = {} if parts[1] == "result": parsed[parts[0]]["result"] = parts[2] == "OK" else: parsed[parts[0]][parts[1].lower()] = parts[2] # add IPC to the output for case in parsed.values(): if "cycles" in case and "instructions" in case: case["ipc"] = "{:.3f}".format(int(case["instructions"]) / int(case["cycles"])) return parsed
e24e961e5222d952e79369d22365723919cc3bfa
703,363
from typing import Any from typing import List def ensure_list(data: Any) -> List: """Ensure input is a list. Parameters ---------- data: object Returns ------- list """ if data is None: return [] if not isinstance(data, (list, tuple)): return [data] return list(data)
58016feaf49d63255944fa615e7e2e1dac6dcc76
703,372
def init_split(df, featname, init_bins=100): """ 对df下的featname特征进行分割, 最后返回中间的分割点刻度 为了保证所有值都有对应的区间, 取两个值之间的中值作为分割刻度 注意这里的分割方式不是等频等常用的方法, 仅仅是简单地找出分割点再进行融合最终进行分割 注意, 分出的箱刻度与是否闭区间无关, 这个点取决于用户,这个函数仅考虑分箱的个数 同时, 分箱多余的部分会进入最后一个箱, 如101个分100箱, 则最后一个箱有两个样本 Parameters: ---------- df: dataframe,输入的df, featname:str, 特征名称 init_bins:int, 需要分的箱个数 Returns: ------- 返回分割的刻度列表(升序),如[1,5,9,18] """ # 初始化取值个数列表, 同时排序 list_unique_vals_order = sorted(list(set(df[featname]))) # 取得中间的刻度值, 注意是遍历到len - 1 list_median_vals = [] for i in range(len(list_unique_vals_order) - 1): list_median_vals.append((list_unique_vals_order[i] + list_unique_vals_order[i + 1]) / 2) # 初始化初始分箱的个数, cnt_unique_vals = len(list_median_vals) # 如果初始分箱个数小于init_bins了, 则直接返回 # 如果初始分箱个数大于init_bins, 则从头开始抓取init_bins个值,所以剩余值会留在最后一组 if cnt_unique_vals <= init_bins: return list_median_vals else: # 计算每个箱的个数, 注意这里要用求商 cnt_perbin = cnt_unique_vals // init_bins # 取得中间的init_bins个值 list_median_vals = [list_median_vals[i * cnt_perbin] for i in range(init_bins - 1)] return list_median_vals
172f48bb019fd4cf4941b2ff0fefcb24709fe7a4
703,375
from typing import OrderedDict def filter_excluded_fields(fields, Meta, exclude_dump_only): """Filter fields that should be ignored in the OpenAPI spec :param dict fields: A dictionary of of fields name field object pairs :param Meta: the schema's Meta class :param bool exclude_dump_only: whether to filter fields in Meta.dump_only """ exclude = getattr(Meta, "exclude", []) if exclude_dump_only: exclude += getattr(Meta, "dump_only", []) filtered_fields = OrderedDict( (key, value) for key, value in fields.items() if key not in exclude ) return filtered_fields
a39a7665052cde0ba9f694696a57f2b1f6ca0603
703,378
def findNearestNeighbourPixel(img, seg, i, j, segSize, fourConnected): """ For the (i, j) pixel, choose which of the neighbouring pixels is the most similar, spectrally. Returns tuple (ii, jj) of the row and column of the most spectrally similar neighbour, which is also in a clump of size > 1. If none is found, return (-1, -1) """ (nBands, nRows, nCols) = img.shape minDsqr = -1 ii = jj = -1 # Cope with image edges (iiiStrt, iiiEnd) = (max(i-1, 0), min(i+1, nRows-1)) (jjjStrt, jjjEnd) = (max(j-1, 0), min(j+1, nCols-1)) for iii in range(iiiStrt, iiiEnd+1): for jjj in range(jjjStrt, jjjEnd+1): connected = ((not fourConnected) or ((iii == i) or (jjj == j))) if connected: segNbr = seg[iii, jjj] if segSize[segNbr] > 1: # Euclidean distance in spectral space. Note that because # we are only interested in the order, we don't actually # need to do the sqrt (which is expensive) dSqr = ((img[:, i, j] - img[:, iii, jjj]) ** 2).sum() if minDsqr < 0 or dSqr < minDsqr: minDsqr = dSqr ii = iii jj = jjj return (ii, jj)
e205652d7b39c922a203162f3cdc58672347b938
703,379
from typing import List def readFile(filename: str) -> List[str]: """ Reads a file and returns a list of the data """ try: with open(filename, "r") as fp: return fp.readlines() except: raise Exception(f"Failed to open {filename}")
a5817ab563b83d5cf4999290d10a14a0b68954b6
703,381
def get_trunc_hour_time(obstime): """Truncate obstime to nearest hour""" return((int(obstime)/3600) * 3600)
8379aafc76c1987f537ea294c7def73ad32f1b23
703,384
import re def normalize_package_name(python_package_name): """ Normalize Python package name to be used as Debian package name. :param python_package_name: The name of a Python package as found on PyPI (a string). :returns: The normalized name (a string). >>> from py2deb import normalize_package_name >>> normalize_package_name('MySQL-python') 'mysql-python' >>> normalize_package_name('simple_json') 'simple-json' """ return re.sub('[^a-z0-9]+', '-', python_package_name.lower()).strip('-')
47a850c601e64b570470a339769742d8f3732e28
703,385
def url_join(*args): """Join combine URL parts to get the full endpoint address.""" return '/'.join(arg.strip('/') for arg in args)
2b8409910186d3058c2e49bbf6f974a7cfceeffb
703,389
def is_pairwise_disjoint(sets): """ This function will determine if a collection of sets is pairwise disjoint. Args: sets (List[set]): A collection of sets """ all_objects = set() for collection in sets: for x in collection: if x in all_objects: return False all_objects.add(x) return True
4d5c434b2db2cb167f51aa4c04534a9b12239547
703,391
def format_timedelta(days: int = 0, hours: int = 0, minutes: int = 0, seconds: int = 0) -> str: """Returns a simplified string representation of the given timedelta.""" s = '' if days == 0 else f'{days:d}d' if hours > 0: if len(s) > 0: s += ' ' s += f'{hours:d}h' if minutes > 0: if len(s) > 0: s += ' ' s += f'{minutes:d}min' if seconds > 0 or len(s) == 0: if len(s) > 0: s += ' ' s += f'{seconds:d}sec' return s
6414e2be5a01f178d6515ab6f21ea7c5ab4d5004
703,402
def accuracy(letters, target_string): """ Comparing accuracy to the correct answer. Args: letters(np.array): (num_chars, ) target_string(str) Return: float: accuracy. """ count = 0 assert len(letters) == len(target_string) for i in range(len(target_string)): if letters[i] == target_string[i]: count += 1 return count / len(target_string)
5898a086997d3b9ff9f9bcf84b747dd553a0e4cb
703,404
def relu(attrs, inputs, proto_obj): """Computes rectified linear function.""" return 'relu', attrs, inputs
d45c7f517c2cef57206db56b1ed7402127e72a01
703,407
import random import string def random_string(length=16): """Generate a random string of the given length.""" result = "" while len(result) < length: result += random.choice(string.ascii_letters + string.digits) return result
04bf92658ce8d9535aa91c751c5d9f1746827eaf
703,412
def numpy_unpad(x, pad_width): """Unpad an array. Args: x (numpy.ndarray): array to unpad pad_width (tuple): padding Returns: numpy.ndarray """ slices = [] for c in pad_width: e = None if c[1] == 0 else -c[1] slices.append(slice(c[0], e)) return x[tuple(slices)]
31f41a7a741d7efa870670c95a8acf8be365975a
703,416
def validate_spec(index, spec): """ Validate the value for a parameter specialization. This validator ensures that the value is hashable and not None. Parameters ---------- index : int The integer index of the parameter being specialized. spec : object The parameter specialization. Returns ------- result : object The validated specialization object. """ if spec is None: msg = "cannot specialize template parameter %d with None" raise TypeError(msg % index) try: hash(spec) except TypeError: msg = "template parameter %d has unhashable type: '%s'" raise TypeError(msg % (index, type(spec).__name__)) return spec
3612d927ef7e61613e0991ffc04ea76555d1b115
703,418
def _replace_suffix(string, old, new): """Returns a string with an old suffix replaced by a new suffix.""" return string.endswith(old) and string[:-len(old)] + new or string
4e487f62126b130d973f249cd34abc5a75df3eaa
703,422
def is_empty_json_response_from_s3(context): """Check if the JSON response from S3 is empty (but not None).""" return context.s3_data == {}
6ec3a41646de74d82f3786e772228da55d91b63a
703,423
def mock_validator_execute_validator(*args, **kwargs): """ Mock method to just return the builded command line without executing it. """ cls = args[0] command = args[1] return command
d03cc70f1f0a5bd59e7a116e3774b99aa8db5a03
703,424
import logging def _set_logger( logger_file_path: str, logger_name: str = "default_logger", write_to_console: bool = True, ) -> logging.Logger: """Set logger to log to the given path. Modified from https://docs.python.org/3/howto/logging-cookbook.html Args: logger_file_path (str): Filepath to write to logger_name (str, optional): Name of the logger to use. Defaults to "default_logger" write_to_console (bool, optional): Should write the logs to console. Defaults to True Returns: logging.Logger: Logger object """ logger = logging.getLogger(name=logger_name) logger.setLevel(level=logging.INFO) # create file handler which logs all the messages file_handler = logging.FileHandler(filename=logger_file_path) file_handler.setLevel(level=logging.INFO) # create formatter and add it to the handlers formatter = logging.Formatter(fmt="%(message)s") file_handler.setFormatter(fmt=formatter) # add the handlers to the logger logger.addHandler(hdlr=file_handler) if write_to_console: # create console handler with a higher log level stream_handler = logging.StreamHandler() stream_handler.setLevel(level=logging.INFO) # add formatter to the handlers stream_handler.setFormatter(fmt=formatter) # add the handlers to the logger logger.addHandler(hdlr=stream_handler) return logger
338999fbcd34367f1dc4140143688965663bf486
703,426
import torch def dagger(x: torch.Tensor) -> torch.Tensor: """Conjugate transpose of a batch of matrices. Matrix dimensions are assumed to be the final two, with all preceding dimensions batched over.""" return x.conj().transpose(-2, -1)
672d26333182803b18f3d1d10e49ef393e216f5f
703,428
def find_left_anchor_index(fragment_info, fragments): """ Description: Use the fragment information to find which fragment is the left anchor :param fragment_info: [list[dict]] the list of fragment information :param fragments: [list] the list of fragments being searched :return: left_anchor_index: [int] the index location in the fragment list for the left anchor fragment """ for info in fragment_info: if len(info['left_matches']) == 0: left_anchor_index = fragments.index(info['anchor']) return left_anchor_index
502473f7fee00a5ccd1ed3d6cf5c938e8c4d2041
703,430
def get_all_preconditions(action_set): """ Returns a set of all preconditions for the actions in the given set """ all_precons = set() for a in action_set: all_precons.update(a.precondition) return all_precons
1697c2d013b07bbfc24ca6273d16523eb1d83acf
703,431
def f_line(x, m, b): """ Line fit with equation y = mx + b Parameters ---------- x : array x values m : float slope b : float y-intercept Returns ------- y : array y values """ y = m*x + b return y
daf437461c2e8723a824e4083794d1f6ea6b368b
703,432
def getDistance(sensor): """ Return the distance of an obstacle for a sensor. The value returned by the getValue() method of the distance sensors corresponds to a physical value (here we have a sonar, so it is the strength of the sonar ray). This function makes a conversion to a distance value in meters. """ return ((1000 - sensor.getValue()) / 1000) * 5
d00b1d201fb59939d2b8a2e57bfa35588780b6d5
703,433