content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import win32api, win32process from typing import Optional def _set_win_process_priority() -> Optional[bool]: """ Sets the process priority class to an elevated value. Microbenchmarks are typically very short in duration and therefore are prone to noise from other code running on the same machine. Setting the process priority to an elevated level while running the benchmarks helps mitigate that noise so the results are more accurate (and also more consistent between runs). Returns ------- success : bool, optional Indication of whether (or not) setting the process priority class succeeded. If the priority did not need to be elevated (because it was already), None is returned. """ # Psuedo-handle for the current process. # Because this is a psuedo-handle (i.e. isn't a real handle), it doesn't need to be cleaned up. curr_proc_hnd = win32api.GetCurrentProcess() # We use the 'ABOVE_NORMAL_PRIORITY_CLASS' here, as that should be good enough to reduce general noise; # if necessary, we can try the 'HIGH_PRIORITY_CLASS' but that class and higher can begin to cause the system # to become unresponsive so we'll avoid it unless needed; or we can control it with something like a # 'strong_hint' bool parameter which chooses between ABOVE_NORMAL_PRIORITY_CLASS and HIGH_PRIORITY. target_priority_class: int = win32process.ABOVE_NORMAL_PRIORITY_CLASS try: # Get the current process priority class. If it's already equal to or higher than the class # we were going to set to, don't bother -- we don't want to lower it. current_priority_class = win32process.GetPriorityClass(curr_proc_hnd) if current_priority_class >= target_priority_class: return None else: # Try to set the priority level for the current process. # It can fail if the user (or process) hasn't been granted the PROCESS_SET_INFORMATION right. # https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-setpriorityclass return win32process.SetPriorityClass(curr_proc_hnd, target_priority_class) except: return False
c69e2ffcf1de40507f77113ea9d4a15730ed3f1a
17,960
def linear_interp(x, in_min, in_max, out_min, out_max): """ linear interpolation function maps `x` between `in_min` -> `in_max` into `out_min` -> `out_max` """ return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
eff41353ee48e1c2b1372030fa5480ce8e3bb817
17,963
def new_list(a): """ Converts lists into number format with minimal decimal places :param a: list :return: new list with floats """ b = [] for i in a: b.append(float(format(i, ".2f"))) return b
f1f80ea44f58f0780f02e8df698dbeb81d7ac934
17,967
def z2g(r_geoid, g0, z): """Calculate gravitational acceleration at elevation Derived from atmlabs equivalent function https://www.sat.ltu.se/trac/rt/browser/atmlab/trunk/geophysics/pt2z.m :param r: surface radius at point [m] :param g0: surface gravitational acceleration at point [m/s^2] :param z: elevation [m] :returns: gravitational acceleration at point [m/s^2] """ #137 function g = z2g(r_geoid,g0,z) #138 % #139 g = g0 * (r_geoid./(r_geoid+z)).^2; return g0 * (r_geoid/(r_geoid+z))**2;
c04080156670e137f56ba4dfc872530bbada4d27
17,969
def get_crawled_date(df): """ Extract crawled date from the 'scrape_id' field. """ df['crawled_date'] = df['scrape_id'].astype(str) df['crawled_date'] = df['crawled_date'].apply(lambda x: x[:8]) return df
af5242fa98de713eaa0bbd8684f797b5db563033
17,971
from typing import Union from typing import Callable from typing import List from typing import Tuple def make_cmd( to_fn_call: Union[str, Callable[[List[str]], str]], arity: int ) -> Tuple[str, int]: """ Returns a tuple with the transpiled command and its arity. :param to_fn_call If Callable, takes a list of variables that hold values popped from the stack (reversed) and returns a string representing a value created by running some function. If str, its format method will be called with the aforementioned list of variables as arguments. on those variables :param arity The arity of the function """ var_names = [f"x{n}" for n in range(arity, 0, -1)] if isinstance(to_fn_call, str): if arity > 0: fn_call = to_fn_call.format(*var_names) else: fn_call = to_fn_call else: fn_call = to_fn_call(var_names) if arity > 0: cmd = f"{', '.join(var_names[::-1])} = pop(vy_globals.stack, {arity});" else: cmd = "" cmd += f"res = {fn_call}; vy_globals.stack.append(res);" return cmd, arity
bf71c9cb40aee40df3cee1ee08e156bca62091ff
17,974
def num_divisors_from_fac(fac): """ Given a prime factorization fac (a dictionary of the form prime : exponent), returns the number of divisors of the factorized number. """ acc = 1 for p in fac: acc *= fac[p] + 1 return acc
776991357a5f198f156e74657b557cd889de06b3
17,976
import math def cost(a,b,c,e,f,p_min,p): """cost: fuel cost based on "standard" parameters (with valve-point loading effect) """ return a + b*p + c*p*p + abs(e*math.sin(f*(p_min-p)))
c0acc4e8820e5ce354cee411afc1a121c07757bc
17,979
def xgcd(x, y): """Extended GCD Args: x (integer) y (integer) Returns: (gcd, x, y) where gcd is the greatest common divisor of a and b. The numbers x, y are such that gcd = ax + by. """ prev_a = 1 a = 0 prev_b = 0 b = 1 while y != 0: q = x // y temp = x % y x = y y = temp temp = a a = prev_a - q * a prev_a = temp temp = b b = prev_b - q * b prev_b = temp return [x, prev_a, prev_b]
f7d326302042880f72b9e70b0ea66890b00a093d
17,982
def chunk(mylist, chunksize): """ Args: mylist: array chunksize: int Returns: list of chunks of an array len chunksize (last chunk is less) """ N = len(mylist) chunks = list(range(0, N, chunksize)) + [N] return [mylist[i:j] for i, j in zip(chunks[:-1], chunks[1:])]
69cc8aa812c989c63dae4c444ee132c54a861fa4
17,986
def is_rejected_with_high_vaf(vaf, status, vaf_th): """Check if SNV is rejected with vaf over threshold. Args: vaf: vaf of the SNV. vaf_th: vaf threshold. status: reject/pass status of SNV. Returns: True if rejected, False otherwise """ if vaf > vaf_th and status == 'REJECT': return True else: return False
9b048c9a2d562f50cbfc4af56d4c33189d95f00e
17,987
def icingByteFcnPRB(c): """Return probability value of icing pixel. Args: c (unicode): Unicode icing pixel. Returns: int: Value of probability portion of icing pixel. """ return ord(c) & 0x07
a19d9896ed1b932e44629cb0eff7268ffe8c6fee
17,999
def replace_key_value(lookup, new_value, expected_dict): """ Replaces the value matching the key 'lookup' in the 'expected_dict' with the new value 'new_value'. """ for key, value in expected_dict.items(): if lookup == key: if isinstance(value, dict) and isinstance(new_value, dict): value.update(new_value) else: expected_dict[key] = new_value elif isinstance(value, dict): expected_dict[key] = replace_key_value(lookup, new_value, value) return expected_dict
923cda5b8d2ba44749345d7d1adf907d4c5f3b23
18,002
from unittest.mock import Mock def mock_group(test_state={}, n=0): """Mock a Tradfri group.""" default_state = {"state": False, "dimmer": 0} state = {**default_state, **test_state} mock_group = Mock(member_ids=[], observe=Mock(), **state) mock_group.name = "tradfri_group_{}".format(n) return mock_group
76664f2350838dbc8170aa3eeed972a7f7eda304
18,003
def bend(msg): """Returns the pitch bend value of a pitch bend message.""" return (msg[2] & 0x7F) << 7 | (msg[1] & 0x7F)
e3e19567a92fccb609b85fed42245727221e72f1
18,006
def AND_nobias(x1: int, x2: int) -> int: """ [summary] AND operator wituout bias term Args: x1 (int): 1st input for AND x2 (int): 2nd input for AND Returns: int: result of AND operator """ (w1, w2, theta) = (0.5, 0.5, 0.7) tmp = w1 * x1 + w2 * x2 if tmp <= theta: return 0 else: return 1
e58a125a9c21233b8de7a044e5c0acdfdb29d1fa
18,009
def filterReturnChar(string): """ 过滤字符串中的"\r"字符 :param string: :return: 过滤了"\r"的字符串 """ return string.replace("\r", "")
fff1b39d8c837fc33e627d6ad2395eb05458517a
18,016
def count_logistic(x): """Return value of a simple chaotic function.""" val = 3.9 * x * (1-x) return val
476c4efb391652e25443685f2660b82721c04e94
18,018
import torch def load_latent_avg(checkpoint_path): """Load the latent average used by encoder.""" checkpoint = torch.load(checkpoint_path) return checkpoint["latent_avg"]
51ed354bd67ae78a6000dd5b60cab2e6a0cec413
18,020
def makov_payne_correction(defect_cell, geometry, e_r, mk_1_1): """ Makov-Payne correction :param defect_cell: Cell object of the defect cell calculation :param geometry: geometry of the host cell :param e_r: relative permittivity :param mk_1_1: Value of the first term of the Makov-Payne correction in the case q = 1 & e_r = 1 """ c_sh_dico = {'sc': -0.369, 'fcc': -0.343, 'bcc': -0.342, 'hcp': -0.478, 'other': -1./3} c_sh = c_sh_dico[geometry] return (1. + c_sh * (1. - 1./e_r)) * defect_cell.charge ** 2 * mk_1_1 / e_r
670755010e0bc2af35eea43563aae807db1d05d5
18,023
def deslugify(slug): """Turn a slug into human speak""" return slug.replace('-', ' ').title()
d2a58a68d1759007068ca484146b2ca70fc88ce9
18,033
def unpack_question_dimid(dimension_id): """Decompose the dimension id into unit, lesson and question id. Returns: A tuple unit_id, lesson_id, question_id. unit_id and question_id are strings. lesson_id can be a string or None. """ unit_id, lesson_id, question_id = dimension_id.split(':') if lesson_id == 'None': lesson_id = None return unit_id, lesson_id, question_id
4133ec1ce5cd986b64c9af096931b9f2bf8cb123
18,046
import time def timestamp2time( timestamp ): """ convert a timestamp in yyyy.mm.dd.hh.mm.ss format to seconds for comparisons """ return time.mktime(time.strptime(timestamp,"%Y.%m.%d.%H.%M.%S"))
7a2e9a6b51de0c9a4ebd69e1e0050bab4af064f9
18,048
import six def sorted_files(pdict): """ Returns the file_summary information sorted in reverse order based on duration :param pdict: profile dict :return: sorted list of file_summary dicts """ return sorted([v['file_summary'] for k, v in six.iteritems(pdict) if k != 'summary'], key=lambda f: f['duration'], reverse=True)
3135561bd68cdc0018c1b904f8a6b47dc4a96468
18,052
def str_set_of_candidates(candset, cand_names=None): """ Nicely format a set of candidates. .. doctest:: >>> print(str_set_of_candidates({0, 1, 3, 2})) {0, 1, 2, 3} >>> print(str_set_of_candidates({0, 3, 1}, cand_names="abcde")) {a, b, d} Parameters ---------- candset : iterable of int An iteratble of candidates. cand_names : list of str or str, optional List of symbolic names for every candidate. Returns ------- str """ if cand_names is None: named = sorted(str(cand) for cand in candset) else: named = sorted(str(cand_names[cand]) for cand in candset) return "{" + ", ".join(named) + "}"
817592c964f8912585944a05e11606b4c4597721
18,063
def find_anagrams(word, candidates): """ Return a list of word's anagrams from candidates """ same_removed = filter(lambda x: x.lower() != word.lower(), candidates) return filter(lambda x: sorted(x.lower()) == sorted(word.lower()), same_removed)
4c016e6febd9c1d4144b4756d43b5f596c33ec4f
18,069
from typing import Tuple from typing import Any from typing import List def _convert_record_to_object(class_: type, record: Tuple[Any], field_names: List[str]) -> Any: """ Convert a given record fetched from an SQL instance to a Python Object of given class_. :param class_: Class type to convert the record to. :param record: Record to get data from. :param field_names: Field names of the class. :return: the created object. """ kwargs = dict(zip(field_names, record[1:])) field_types = {key: value.type for key, value in class_.__dataclass_fields__.items()} for key in kwargs: if field_types[key] == bytes: kwargs[key] = bytes(kwargs[key], encoding='utf-8') obj_id = record[0] obj = class_(**kwargs) setattr(obj, "obj_id", obj_id) return obj
3422fb51f959f1ead4f6af05bf750746358263bd
18,080
def make_invalid_varname_comment(varname: str): """Make a comment about a Stata varname being invalid.""" return f'* Invalid STATA varname: {varname}'
e40dc6a8d15d81a9af3e25825f0ff424379c73ca
18,083
def get_skip_comments(events, skip_users=None): """ Determine comment ids that should be ignored, either because of deletion or because the user should be skipped. Args: events: a list of (event_type str, event_body dict, timestamp). Returns: comment_ids: a set of comment ids that were deleted or made by users that should be skipped. """ skip_users = skip_users or [] skip_comments = set() for event, body, _timestamp in events: action = body.get('action') if event in ('issue_comment', 'pull_request_review_comment'): comment_id = body['comment']['id'] if action == 'deleted' or body['sender']['login'] in skip_users: skip_comments.add(comment_id) return skip_comments
30663624714104bc7b9aa0fd4da45f537b06420f
18,086
def encode_jump(input_string): """Encode input string according to algorithm specifications""" jump_map = { "0": "5", "1": "9", "2": "8", "3": "7", "4": "6", "5": "0", "6": "4", "7": "3", "8": "2", "9": "1", } output_string = "" output_string = ''.join([jump_map[char] if char in jump_map else char for char in input_string]) return output_string
b316ffab520ce2cdbc97e9711b897e85081a827e
18,087
def sluggify(text): """ Create a file system friendly string from passed text by stripping special characters. Use this function to make file names from arbitrary text, like titles :param text: :return: """ if not text: return '' data = ''.join([c for c in text if c.isalpha() or c.isdigit() or c in [' ', '.', ',', '_', '-', '=']]).rstrip() truncated = data[:75] if len(data) > 75 else data return truncated
a597f1625ee215314a77832b5f0fce2b7884c30b
18,089
def UnZeroMatrix(matrix): """ replaces all instances of 0.0000 in the matrix with the lowest observed non-zero value assumes first row and first column of matrix are descriptors """ minNonZero = min([float(m) for m in matrix[1][1:]]) for line in matrix[2:]: for el in line[1:]: if float(el) != 0.000 and float(el) < minNonZero: minNonZero = float(el) newMat = [matrix[0]] for line in matrix[1:]: curVec = [line[0]] for el in line[1:]: if float(el) == 0.000: curVec.append(minNonZero) else: curVec.append(float(el)) newMat.append(curVec) return newMat
8ede8d57e009aa8248f73d70fe48d4c4ea80baa8
18,090
import sqlite3 def create_connection(database_name): """Create database connection""" return sqlite3.connect(database_name)
754d569fef76bc9b498efab176f33ed35fc60f6c
18,104
def decorate_table(table_text, convert_fun, d_cols=" & ", d_rows="\\\\\n"): """Transforms text of the table by applying converter function to each element of this table. :param table_text: (str) text of the table. :param convert_fun: (str => str) a function to be applied to each element of the table. :param d_cols: (str) delimiter between columns. :param d_rows: (str) delimiter between rows. :return: (str) text of the converted table. """ def process_cell(s): return str(convert_fun(s)) if d_cols not in table_text: return table_text # delimiter was not present splitted = table_text.split(d_cols) new_text = "" for i in range(0, len(splitted)): s = splitted[i] last_in_row = d_rows in s if last_in_row: two_elems = s.split(d_rows) decorated = process_cell(two_elems[0]) + d_rows if len(two_elems) > 1 and two_elems[1] != '': decorated += process_cell(two_elems[1]) else: decorated = convert_fun(s) new_text += decorated if i < len(splitted)-1: new_text += d_cols return new_text
55788a8ffb853702b81b38dc446ca9951371f9c9
18,105
def _qualname(cls): """ Returns a fully qualified name of the class, to avoid name collisions. """ return u'.'.join([cls.__module__, cls.__name__])
530fb5a7e9231702850c4bc8be09e9ca7e9dd8f5
18,107
def _check_name_should_break(name): """ Checks whether the passed `name` is type `str`. Used inside of ``check_name`` to check whether the given variable is usable, so we should stop checking other alternative cases. Parameters ---------- name : `Any` Returns ------- should_break : `bool` If non empty `str` is received returns `True`, meanwhile if `None` or empty `str` is received `False`. Raises ------ TypeError If `name` was not passed as `None` or type `str`. """ if (name is None): return False if type(name) is not str: raise TypeError(f'`name` should be `None` or type `str`, got `{name.__class__.__name__}`.') if name: return True return False
4e6981fa840b89bf69a1a0e6c6401b1e2387e17d
18,115
def normalize_ns(namespaces: str) -> str: """ Normalizes url names by collapsing multiple `:` characters. :param namespaces: The namespace string to normalize :return: The normalized version of the url path name """ return ':'.join([nmsp for nmsp in namespaces.split(':') if nmsp])
3be2e6cf1615d9610ec6695966c9c32f4b531ea3
18,119
def get_param_dict(df, column_name, cast_as_type): """ :param df: the project-params dataframe :param column_name: string, column name of the parameter to look for :param cast_as_type: the type for the parameter :return: dictionary, {project: param_value} Create a dictionary for the parameter to load into Pyomo. """ param_dict = dict() for row in zip(df["project"], df[column_name]): [prj, param_val] = row # Add to the param dictionary if a value is specified # Otherwise, we'll use the default value (or Pyomo will throw an # error if no default value) if param_val != ".": param_dict[prj] = cast_as_type(row[1]) else: pass return param_dict
b2b0dd2626e9cceb1dec18e14467178e42f1fd06
18,121
def get_payer_channel(channelidentifiers_to_channels, transfer_pair): """ Returns the payer channel of a given transfer pair. """ payer_channel_identifier = transfer_pair.payer_transfer.balance_proof.channel_address assert payer_channel_identifier in channelidentifiers_to_channels payer_channel = channelidentifiers_to_channels[payer_channel_identifier] return payer_channel
42a0f99a65888dbe8b19d248f5d78d2b8b5f0fd1
18,123
def forward2reverse(dna): """Converts an oligonucleotide(k-mer) to its reverse complement sequence. All ambiguous bases are treated as Ns. """ translation_dict = {"A": "T", "T": "A", "C": "G", "G": "C", "N": "N", "K": "N", "M": "N", "R": "N", "Y": "N", "S": "N", "W": "N", "B": "N", "V": "N", "H": "N", "D": "N", "X": "N"} letters = list(dna) letters = [translation_dict[base] for base in letters] return ''.join(letters)[::-1]
81d9e1eeebc2f446ada6e88be6f4332510a8e5e4
18,124
from typing import List import pathlib def parts_to_path(parts: List[str]) -> str: """Convert list of path parts into a path string e.g j.sals.fs.parts_to_path(["home","rafy"]) -> 'home/rafy' Args: parts (List[str]): path parts Returns: str: joined path parts """ path = pathlib.Path(parts[0]) for p in parts[1:]: path = path.joinpath(p) return str(path)
71fcc68b91bbfa868fec8f0e70dbc8434417664f
18,131
import json def my_json_dumps(data): """my_json_dumps JSON formatter Arguments: data {str} -- Data to JSON beautify Returns: str -- Data beautified """ return json.dumps(data, indent=2, sort_keys=True)
cc9b9424b79642d3dbd6eaec2dd685ecaed3b239
18,133
def get_preprocess_filenames(pipelines, vocab_file, dataset_file=None): """ Gets the appropriate vocabulary file path to load. Parameters ---------- pipelines : list List of preprocessing pipelines. vocab_file : str Path of vocabulary file. dataset_file : str, optional Path of dataset file. Returns ------- vocab_to_load : str Path of vocabulary file to load. dataset_to_load : str, optional Path of dataset to load. """ pipelines.sort() if pipelines is None or len(pipelines) == 0: return None dataset_to_load = dataset_file[:-4] if dataset_file else '' vocab_to_load = vocab_file[:-4] for pipeline in pipelines: dataset_to_load += '_' + pipeline vocab_to_load += '_' + pipeline dataset_to_load += '.csv' vocab_to_load += '.txt' if dataset_file: return vocab_to_load, dataset_to_load else: return vocab_to_load
727eda55b1e2de37d5fd00569c4efa849e27e1a9
18,137
def _is_utf8(filepath): """ check if file is utf8 encoding """ with open(filepath, 'rb') as f: content = f.read() try: content_utf8 = content.decode('utf-8') except UnicodeDecodeError as e: return False return True
7e92aa296122368dd4aaa2af08b9d31ac0e674f9
18,140
import math def get_cross_points(a, b, c, image_height): """ solve the quadratic equation x = ay^2 + by + c Parameters ---------- a: the first coefficient of quadratic equation b: the second coefficient of quadratic equation c: the third coefficient of quadratic equation image_height: the height of image Return ---------- (number of cross_points, cross_points) """ assert a d = b**2 - 4*a*c if d < 0: return (0, None) elif d == 0: y = -b / (2 * a) if (y < image_height) and (y >= 0): return (0, y) else: return (0, None) else: y1 = (-b + math.sqrt(d)) / (2 * a) y2 = (-b - math.sqrt(d)) / (2 * a) if (y1 < image_height) and (y1 >= 0) and (y2 < image_height) and (y2 >= 0) : return (2,(y1, y2)) elif (y1 < image_height) and (y1 >= 0): return (1, y1) elif (y2 < image_height) and (y2 >= 0): return (1, y2) else: return (0, None)
d27c37195631083742c006f443e4d56e6bd21d64
18,143
def insert_nested_value(dictionary, path, value): """ Given a `dictionary`, walks the `path` (creating sub-dicts as needed) and inserts the `value` at the bottom. Modifies `dictionary`, and also returns `dictionary` as a convenience. For example: >>> insert_nested_value({}, ['foo', 'bar'], 'baz') {"foo": {"bar": "baz"}} """ # This is a pointer into our data structure allowing us to walk # down levels. breadcrumb = dictionary while path: # Remove the head of our list key = path.pop(0) # If our path is now empty, we've reached the end. if not path: breadcrumb[key] = value # Otherwise, we need to dig deeper, adding a layer of dict # if not already there. else: if key not in breadcrumb: breadcrumb[key] = {} breadcrumb = breadcrumb[key] return dictionary
1d2c66f2d4e05b2553bd7feb616e1ddc7c24936e
18,147
def tlvs_by_type(tlvs, tlv_type): """Return list of TLVs with matching type.""" return [tlv for tlv in tlvs if tlv.tlv_type == tlv_type]
5adda249aef14dc5523dc0a8ec84bbe599a266f7
18,149
import pickle def read_pickled_data(pickle_id): """ Loads pickled data based on ID string. Parameters ---------- filter_id : str An ID strong made of the month of the request and the a hash-value generated from the filter settings. Returns ------- data: list A list containing one or more files """ data = [] with open(pickle_id+".pickle", "rb") as f: for _ in range(pickle.load(f)): data.append(pickle.load(f)) return data
e7f851f968880087209abf89fb23523c2653b298
18,150
def get_mode_count(df): """ Computes the mode and the count of the mode from an array. Args: df - data fram with ONE column Returns: df_mode - mode of the column df_mode_count - count for that mode """ # calculate the mode and its count from the input data fram (with one column) df_value_counts = df.value_counts() df_mode = df_value_counts.index[0] df_mode_count = df_value_counts.iloc[0] return df_mode, df_mode_count
8712b6a351c6afdb328e94a633652af86a1f4eba
18,159
import json def get_settings(settings_path): """ Opens the json-file with settings and loads and returns them """ with open(settings_path) as infile: settings = json.load(infile) return settings
a4e2cfc2c63ea2c3422f42c200d1d715338f0898
18,161
import requests def get_user_key(api_key: str, username: str, password: str) -> str: """Login the user and retruns his ``user_key`` This key can be cached since, it is only invalidated by the next (api) login. Arguments: api_key {str} -- the api_key of the application. username {str} -- the username of the user that wants to login. password {str} -- the password of the user that wants to login. Returns: user_key {str} -- The ``user_key`` of the user that logged in. Raises: ValueError: Bad API request, use POST request, not GET ValueError: Bad API request, invalid api_dev_key ValueError: Bad API request, invalid login ValueError: Bad API request, account not active ValueError: Bad API request, invalid POST parameters """ r = requests.post("https://pastebin.com/api/api_login.php", data={ "api_dev_key": api_key, "api_user_name": username, "api_user_password": password }) try: r.raise_for_status() except: raise ValueError(r.text) return r.text
92dd0cedfd5364378d5381b0a64844e7f789f62d
18,164
import toml def readToml(filename): """Read a single TOML configuration file, or return an empty dict if it doesn't exist. """ try: with open(filename, encoding='utf-8') as fp: return toml.load(fp) except OSError: return {} except toml.TomlDecodeError as err: raise RuntimeError( f'could not read NICOS config file at {filename!r},' f' please make sure it is valid TOML: {err}') from None
b5d0761015cd1fbeb94bfb771a7ac30fb2c35d3d
18,165
def compute_gradient (X, Y, Theta): """ Computes the cost gradient X = m*n Y = m*1 Theta = n*1 gradient = (1/m) * X_transpose * (X*Theta - Y) """ (m, n) = X.shape return (1.0/m) * (X.T) * (X*Theta - Y)
6348e2099a8ae6e32f6150bcaf1124edadc1c798
18,167
def get_doc_id(element_tree): """ returns the document ID of a SaltXML document. :param tree: an ElementTree that represents a complete SaltXML document :type tree: ``lxml.etree._ElementTree`` """ id_element = element_tree.xpath('labels[@name="id"]')[0] return id_element.attrib['valueString']
64e3e2abda9a0182866cc34b2f510a6c6dffe05b
18,168
def convertMinuteDecimalToDregrees(toconvert): """ Convert minute decimal to degrees """ converted=[] for toc in toconvert: converted.append(float(toc)/60) return converted
7c9f839ccc80f1d2a660ffc51fd05e91304e6683
18,171
def get_subtrees(tree_mem): """ break the tree membership information into which subtrees of a given size are present preparatory step for saving computational steps Parameters ---------- tree_mem : dictionary Contains the group information with the leaves as keys and the list of groups as values Returns ------- subtree_sizes : dictionary contains the subtree information, keys represent the size of the sub tree, values are a tuple of leaf names, rank_id, g_id """ # obtain the maximum rank size keys = list(tree_mem.keys()) max_rank = len(tree_mem[keys[0]]) subtree_sizes = dict() # for each rank for i in range(0, max_rank): # blank the dictionary for the current subtree subtree = dict() # for each of the leaves in the tree membership dictionary rearrange the # information so that the key is the g_id and the value is a list of \ # all the leaves that are in that subtree for key in tree_mem.keys(): tree_id = tree_mem[key][i] if tree_id not in subtree.keys(): subtree[tree_id] = [key] else: s_tree = list(subtree[tree_id]) s_tree.append(key) subtree[tree_id] = s_tree # add to the dictionary of subtrees with the size of the subtree as a key and the values are # tuples that contain a list of the leaves and the rank and group ids for key in subtree: size = len(subtree[key]) if size not in subtree_sizes.keys(): subtree_sizes[size] = [(subtree[key], i, key)] else: temp = list(subtree_sizes[size]) temp.append((subtree[key], i, key)) subtree_sizes[size] = temp return subtree_sizes
fdca6e8fb8c82b896748a2599ab5a25eac377108
18,175
def _deep_merge_dicts(defaults: dict, updates: dict) -> dict: """Merges two dicts recursively, with updates taking precendence. Note that this modifies defaults in place. """ for k, u in updates.items(): if k not in defaults: defaults[k] = u else: v = defaults[k] if isinstance(u, dict) and isinstance(v, dict): defaults[k] = _deep_merge_dicts(v, u) else: defaults[k] = u return defaults
90ea8c5e81998a51005338c0d67cb6a07a4291f2
18,178
import shutil def test_cmtk_install() -> int: """ Tries to determine if CMTK is installed and whether individual binaries are directly accessible or are accessible via the cmtk script call :return: -1: No cmtk install detected, 0: Direct call, 1: call via cmtk script """ if shutil.which("warp") is not None: return 0 if shutil.which("cmtk") is not None: return 1 return -1
1be574fc3b5f9f7bd41056591e5681057ae0ebd4
18,180
def compute_image_shape(width: int, height: int, fmt: str) -> tuple: """Compute numpy array shape for a given image. The output image shape is 2-dim for grayscale, and 3-dim for color images: * ``shape = (height, width)`` for FITS images with one grayscale channel * ``shape = (height, width, 3)`` for JPG images with three RGB channels * ``shape = (height, width, 4)`` for PNG images with four RGBA channels Parameters ---------- width, height : int Width and height of the image fmt : {'fits', 'jpg', 'png'} Image format Returns ------- shape : tuple Numpy array shape """ if fmt == 'fits': return height, width elif fmt == 'jpg': return height, width, 3 elif fmt == 'png': return height, width, 4 else: raise ValueError(f'Invalid format: {fmt}')
f154b3e97ef8f700517985a520104ed26b041a4a
18,181
def summarize_file(file_path): """Summarizes the provided file by some basic measurements. Returns: A tuple containing how many (bytes, words, lines it contains, and what is the maximum character count in one line). """ if file_path is None: return bytes_ = 0 words = 0 lines = 1 max_line_width = 0 with open(file_path) as file: for line_ in file: # enforce string type line: str = line_ if len(line) > max_line_width: max_line_width = len(line) lines += 1 words += len(line.split()) bytes_ += len(line.encode()) return (bytes_, words, lines, max_line_width)
63b56a976f00cf86fc384abdc304b97690f32554
18,183
def compute_products(of_adjacent_digits, in_number): """Compute the products of all N-sized groups of adjacent digits in a predefined number.""" # Convert said number to a string. numberToString = str(in_number) # Register the list of digit group products. products = [] # Build said groups. for i in range(len(numberToString) - of_adjacent_digits + 1): digit_group = numberToString[i:of_adjacent_digits + i] # Register the digit product of the current group. product = 1 for ii in digit_group: product = product * int(ii) products.append(product) return products
56ef92514f2c24a707a3fd174d8bede0d0454ef9
18,184
def get_cart_location(env, screen_width): """ Returns the position of the middle of the cart :param env: :param screen_width: :return: """ world_width = env.x_threshold * 2 scale = screen_width / world_width return int(env.state[0] * scale + screen_width / 2.0)
2b245964e1ce8b70a7964766a13a14e7759e48bf
18,188
def is_happy(n): """ Determines whether the number n is a "happy number". Int -> Bool Notes: Uses str() to split digits (slower, less efficient) Uses a set to contain the sequence generated from n """ n_sequence = set() while n != 1: s = str(n) n = sum(pow(int(x),2) for x in s) if n in n_sequence: return False n_sequence.add(n) return True
30068b5e6e6f0a4aa1fbcb91fc062b663be5a0c1
18,191
import math def cosine_lr_scheduler( lr: float, n_warmup: int = 0, warmup_init_lr: float = -1, min_lr: float = 0.0, t_mult: float = 1.0, lr_period_updates: float = -1, lr_shrink: float = 0.1, max_update: int = -1, ): """Cosine annealing learning rate scheduler with warmup and step decay for LambdaLR. Based on fairseq.optim.lr_scheduler.cosine_lr_scheduler. Args: lr (float): (Maximum) learning rate. n_warmup (int): Number of warmup steps with a linear lr increase. Default is 0. warmup_init_lr (float): Initial learning rate during warmup phase. Default is `lr`. min_lr (float): Minimum learning rate during cosine annealing. Default is 0. t_mult (float): Factor to grow the length of each period. Default is 1. lr_period_updates (float): Initial number of updates per period. lr_shrink (float): Shrink factor for each period. Default 0.1. max_update (int): Number of maximum updates (epochs). If specified, will result in 1 period over all updates. """ max_lr_base = lr min_lr_base = min_lr warmup_end_lr = max_lr_base warmup_init_lr = min_lr if warmup_init_lr < 0 else warmup_init_lr period = lr_period_updates if period <= 0: assert max_update > 0, "Either lr_period_updates or max_update must be set." period = max_update - n_warmup if n_warmup > 0: step_lr = (warmup_end_lr - warmup_init_lr) / n_warmup else: step_lr = 1 lr_shrink_base = lr_shrink def step(epoch: int) -> float: if epoch < n_warmup: return (warmup_init_lr + epoch * step_lr) / max_lr_base cur_updates = epoch - n_warmup if t_mult != 1: i = math.floor(math.log(1 - cur_updates / period * (1 - t_mult), t_mult)) t_i = t_mult**i * period t_cur = cur_updates - (1 - t_mult**i) / (1 - t_mult) * period else: i = math.floor(cur_updates / period) t_i = period t_cur = cur_updates - (period * i) lr_shrink = lr_shrink_base**i min_lr = min_lr_base * lr_shrink max_lr = max_lr_base * lr_shrink return ( min_lr + 0.5 * (max_lr - min_lr) * (1 + math.cos(math.pi * t_cur / t_i)) ) / max_lr_base return step
7438511ad48cf30fa706e5c4fa4d234919a78234
18,196
def sanitize(string): """ Turns '-' into '_' for accumulo table names """ return string.replace('-', '_')
2a86fff76b6d504be7981877612c4b1965d61e4e
18,200
def handle_node_attribute(node, tag_name, attribute_name): """Return the contents of a tag based on his given name inside of a given node.""" element = node.getElementsByTagName(tag_name) attr = element.item(0).getAttribute(attribute_name) return attr
8bbde7ecf335ce37b1fc55e3472aa07af6b7806a
18,201
def select_trees(gdf, subplot): """ Args: gdf: a geopandas dataframe subplot: a shapely box Returns: selected_trees: pandas dataframe of trees """ selected_trees = gdf[gdf.intersects(subplot)] return selected_trees
e0c4acea2622c839fb4afe124aae0a33af012e0f
18,203
def get_pending_registration_ip_set( ip_from_dns_set, ip_from_target_group_set ): """ # Get a set of IPs that are pending for registration: # Pending registration IPs that meet all the following conditions: # 1. IPs that are currently in the DNS # 2. Those IPs must have not been registered yet :param ip_from_target_group_set: a set of IPs that are currently registered with a target group :param ip_from_dns_set: a set of IPs that are in the DNS """ pending_registration_ip_set = ip_from_dns_set - ip_from_target_group_set return pending_registration_ip_set
0bc0c23093bf9881421c5ef2b1307b099ed10384
18,206
def get_identical_attributes(features, exclude=None): """Return a dictionary of all key-value pairs that are identical for all |SegmentChains| in `features` Parameters ---------- features : list list of |SegmentChains| exclude : set attributes to exclude from identity criteria Returns ------- dict Dictionary of all key-value pairs that have identical values in all the `attr` dictionaries of all the features in `features` """ exclude = [] if exclude is None else exclude common_keys = set(features[0].attr.keys()) for feature in features: common_keys &= set(feature.attr.keys()) common_keys -= set(exclude) dtmp = { K : features[0].attr[K] for K in common_keys \ if all([X.attr[K] == features[0].attr[K] for X in features]) == True } return dtmp
efe7e913c7bb5d4f69f50241a999fb85fdd72800
18,208
def compute_avg_over_multiple_runs(number_episodes, number_runs, y_all_reward, y_all_cum_reward, y_all_timesteps): """ Compute average of reward and timesteps over multiple runs (different dates) """ y_final_reward = [] y_final_cum_reward = [] y_final_timesteps = [] for array_index in range(0, number_episodes): sum_r = 0 sum_cr = 0 sum_t = 0 count = 0 for date_index in range(0, number_runs): # compute average sum_r += y_all_reward[date_index][array_index] sum_cr += y_all_cum_reward[date_index][array_index] sum_t += y_all_timesteps[date_index][array_index] count += 1 y_final_reward.append(sum_r / float(count)) y_final_cum_reward.append(sum_cr / float(count)) y_final_timesteps.append(sum_t / float(count)) return y_final_reward, y_final_cum_reward, y_final_timesteps
a991f1f71ada7852a6ed94d7764d8112c6015cd1
18,213
def db36(s): """Convert a Redis base36 ID to an integer, stripping any prefix present beforehand.""" if s[:3] in 't1_t2_t3_t4_t5_': s = s[3:] return int(s, 36)
0946bb125b17afec7803adf3654af7250047f9d6
18,215
def split_touched(files): """Splits files that are touched vs files that are read.""" tracked = [] touched = [] for f in files: if f.size: tracked.append(f) else: touched.append(f) return tracked, touched
574772b7cec285ca9463bbebeab90502933f35a4
18,218
def fromstringtolist(strlist, map_function=int): """ read list written as string map_function function to convert from string to suited element type """ if strlist in ("None", None): return None list_elems = strlist[1:-1].split(",") toreturn = [map_function(elem) for elem in list_elems] return toreturn
23ec31783c66fdb05420a3e233102111a400a53f
18,219
def str_to_c_string(string): """Converts a Python bytes to a C++ string literal. >>> str_to_c_string(b'abc\x8c') '"abc\\\\x8c"' """ return repr(string).replace("'", '"').removeprefix('b')
5316e61282d3ce3a807764588904529291453a37
18,222
def iuo(z1,z2): """ intersection over union :param z1: polygon :param z2: polygon returns z1.intersection(z2) / z1.union(z2) """ assert z1.isvalid assert z2.isvalid return z1.intersection(z2) / z1.union(z2)
c7f682833a82c16fe0959fbed16980188b45dade
18,223
def round_and_clip_image(image): """ Given a dictionary, ensure that the values in the 'pixels' list are all integers in the range [0, 255]. All values should be converted to integers using Python's `round` function. Any locations with values higher than 255 in the input should have value 255 in the output; and any locations with values lower than 0 in the input should have value 0 in the output. """ result = image.copy() s = len(result['pixels']) for i in range(s): x = round(result['pixels'][i]) if x < 0: x = 0 elif x > 255: x = 255 result['pixels'][i] = x return result
2c50a431be17fb203ffcc1136e91e2ffa97a7337
18,224
import requests def getURLChain(targetURL): """For the given URL, return the chain of URLs following any redirects """ ok = False chain = [] try: r = requests.head(targetURL, allow_redirects=True) ok = r.status_code == requests.codes.ok # pylint: disable=no-member if ok: for resp in r.history: chain.append(resp.url) except (requests.exceptions.RequestException, requests.exceptions.ConnectionError, requests.exceptions.HTTPError, requests.exceptions.URLRequired, requests.exceptions.TooManyRedirects, requests.exceptions.Timeout): ok = False return (ok, chain)
ecdfab9aff08035e8f830d67ba1380782ae82e6d
18,226
def dict_get_path(data, path, default=None): """ Returns the value inside nested structure of data located at period delimited path When traversing a list, as long as that list is containing objects of type dict, items in that list will have their "name" and "type" values tested against the current key in the path. Args: data (dict or list): data to traverse path (str): '.' delimited string Kwargs: default: value to return if path does not exist """ keys = path.split(".") for k in keys: if type(data) == list: found = False for item in data: name = item.get("name", item.get("type")) if name == k: found = True data = item break if not found: return default elif type(data) == dict: if k in data: data = data[k] else: return default else: return default return data
00cc29d35f23ebff77c8d66ac95c863b70240f17
18,229
def raises_keyerr(k, m): """ Determine whether a mapping is missing a particular key. This helper is useful for explicitly routing execution through __getitem__ rather than using the __contains__ implementation. :param object k: the key to check for status as missing :param Mapping m: the key-value collection to query :return bool: whether the requested key is missing from the given mapping, with "missing" determined by KeyError encounter during __getitem__ """ try: m[k] except KeyError: return True else: return False
5244f18065f89c6a4c22e11a3b08b7e9628bd616
18,231
def FUNCTION_TO_REGRESS_TEMPLATE(x, a, b, c): """ A simple function to perform regression on. Args: x: A list of integers. a,b,c: Coefficients """ return (x[0] * a/2) + pow(x[1], b) + c
46f53a3d2a7e6729f51439a355991103dcbc03c2
18,236
def replace_substring(text: str, replace_mapping_dict: dict): """Return a string replaced with the passed mapping dict Args: text (str): text with unreplaced string replace_mapping_dict (dict): mapping dict to replace text Returns: str: text with all the substrings replaces """ for search_string, replace_string in replace_mapping_dict.items(): text = text.replace(search_string, replace_string) return text
12ca0addd4417813682100abe28462b88f58933e
18,237
def basefilename(context): """ Returns filename minus the extension """ return context.template_name.split('.')[0]
f077ffd509bce97d989a9cf6f68e1a0f379c6bab
18,240
def is_core(protein, mutation, elaspic_core_data): """ Given protein and mutation, check if that protein.mutation is in elaspic_core_data. Parameters ---------- protein : <str> The UniProt/SWISSPROT protein name. E.g. "Q9UPU5". mutation : <str> The position of the mutation. E.g. "I342V". elaspic_core_data : <DataFrame> The ELASPIC results file that contains only the `core` type entries. Returns ------- is_core_bool : <bool> A boolean value that indicates whether the given protein.mutation is an `core` type. """ core_search_data = elaspic_core_data[(elaspic_core_data["UniProt_ID"] == protein) & (elaspic_core_data["Mutation"] == mutation)] is_core_bool = True if len(core_search_data) else False return is_core_bool
51698e861f7c35e3a1d2e428ed71d178d470e3e1
18,243
def get_dropdown_value(soup, var_id): """Get the current value from a dropdown list. Use when you see a dropdown in this HTML format: <select id="var-id" name="var-name"> <option value="false"> -- Option#1 -- </option> ... <option value="true" selected="selected"> -- Option#2 -- </option> </select> Args: soup (soup): soup pagetext that will be searched. var_id (string): the id of a var, used to find its value. Returns: (string): The text of the dropdown value. """ try: dropdown = soup.find("select", {"id": var_id}) dropdown_value = dropdown.find("option").text return dropdown_value except AttributeError: print('\nERROR: <' + var_id + '> not found!\nPagesoup:\n\n', soup) raise LookupError
d26ca8233b0cabb174a202800b063db810b902a1
18,244
def rgb2hex(r, g, b, normalized=False): """ Converts RGB to HEX color """ # Check if RGB triplett is normalized to unity if normalized: r, g, b = r * 255.0, g * 255.0, b * 255.0 return '#%02X%02X%02X' % (r, g, b)
f05fd63b90ee946e011565194f93f4015a8e2cf1
18,245
def ringfilter(motif, bond, pivot1, pivot2): """ For a given linear sequence of atoms it tests whether they for a ring, i.e., whether atoms pivot1 and pivot 2 are connected. """ if bond[motif[pivot1]][motif[pivot2]] > 0: return 1 else: return 0
2fea90a0965d8a1504f5b82610a90ab8611287b9
18,246
def FindNonTrivialOrbit(generators): """ Given a generating set <generators> (a Python list containing permutations), this function returns an element <el> with a nontrivial orbit in the group generated by <generators>, or <None> if no such element exists. (Useful for order computation / membership testing.) """ if generators == []: return None n = generators[0].n for P in generators: for el in range(n): if P[el] != el: return el
ddd3f92523a0b0f8a7cb08146d4d1014a5adedf1
18,249
def city_functions(city, country, population=''): """Generate a neatly formatted city.""" if population: return (city.title() + ', ' + country.title() + ' - population ' + str(population) + '.') return city.title() + ', ' + country.title() + '.'
1fde2b9bd910e02c67cf7a5c46c13b88d381ce03
18,253
import torch def smooth_dice_loss(pred: torch.Tensor, target: torch.Tensor, smooth: float=1., eps: float=1e-6) -> torch.Tensor: """ Smoothed dice loss. :param pred: (torch.Tensor) predictions, logits :param target: (torch.Tensor) target, logits or binrary :param smooth: (float) smoothing value :param eps: (eps) epsilon for numerical stability :returns dice_loss: (torch.Tensor) the dice loss """ pred = torch.sigmoid(pred) target = (target > 0).float() intersection = (pred.reshape(-1) * target.reshape(-1)).sum() return 1 - ((2. * intersection + smooth) / (pred.sum() + target.sum() + smooth + eps))
30cda9c2789661d997254e43febb4412ecc76b15
18,256
def get_seat_id(ticket: str) -> int: """Get seat id based on boarding ticket (ie. 'BBFFBBFRLL')""" rows = range(128) cols = range(8) for letter in ticket: if letter in "FB": midpoint = len(rows) // 2 rows = rows[:midpoint] if letter == "F" else rows[midpoint:] else: midpoint = len(cols) // 2 cols = cols[:midpoint] if letter == "L" else cols[midpoint:] return rows[0] * 8 + cols[0]
5f901755192c93dc275afa5392c2472db7e60108
18,262
def _create_object_from_type_and_dict(cls, obj_dict): """Creates an object, bypassing the constructor. Creates an object of type `cls`, whose `__dict__` is updated to contain `obj_dict`. Args: cls: The type of the new object. obj_dict: A `Mapping` that should be used to initialize the new object's `__dict__`. Returns: An object of type `cls`. """ value = object.__new__(cls) value.__dict__.update(obj_dict) return value
540c11639f724aeacc745cc7b334abb556783eee
18,263
import copy def populate_target_env_cfg(target_cfg, target_env): """ Read out context from target config then merge it with global magic context All keys in target config that starts with `_` is considered magic context and will be merged into each target_env config. """ # we need to do deepcopy here because yaml extend operation is not a # deepcopy and we will be injecting new keys in the following for loop target_env_cfg = copy.deepcopy(target_cfg[target_env]) for dkey, dval in target_cfg.items(): if dkey.startswith('_') and dkey not in target_env_cfg: target_env_cfg[dkey] = dval return target_env_cfg
b252b87faa50a949b3c1a51ed8da5dac8256a2ac
18,264
def get_option_value(option): """ An option in a Checkboxes or CheckboxTree question is a dict, but we need to treat their contents in consistent ways, e.g. when getting the value to be persisted in the API. :param option: dict from a Question's list of options :return: string value to be persisted """ return option.get('value') or option['label']
98e9119c77a1fbc05f8e988bc7cf9a0e3ef9385e
18,270
def bytes2str(data): """ Convert bytes to string >>> bytes2str(b'Pwning') 'Pwning' >>> """ data = "".join(map(chr, data)) return data
cd2e7fd59628c7b4eb8fdc918148f960f1226d6f
18,271
from io import StringIO def svg(plot, close=True): """ Creates SVG code and closes plot. Args: plot: matplotlib.pyplot Plot from which the SVG should be made. Returns: str SVG code. """ # make SVG svg_file = StringIO() plot.savefig(svg_file, format='svg') svg = svg_file.getvalue() svg_file.close() # close plot if close: plot.close() return svg
4acdd6f346af2de672e538415795e0e1181ee4e4
18,272
def mocked_events_ics(source: str = "uqcs") -> bytes: """ Returns a locally stored .ics file that imitates the UQCS Calendar on Google Calendar. """ with open("test/test_events_events.ics", "rb") as events_file: return events_file.read()
8388d826648c9b274c991453eaf9e3885794e980
18,275
def _dictify(value): """ Converts non-dictionary value to a dictionary with a single empty-string key mapping to the given value. Or returns the value itself if it's already a dictionary. This is useful to map values to row's columns. """ return value if isinstance(value, dict) else {'': value}
241ead832384a1459666f70829634aa8f3fdc2ae
18,279
import random def random_row(categories=None): """ Make a random row Returns rows like (category, a, b): - category is a categorical variable, - a is a randomly selected cluster value (with two clusters) - b is a uniform integer on 0, 1 Returns: (category, a, b) - the random row """ # Category categories = categories or 'xyz' category = random.choice(categories) # Make a from clusters a_mean = random.choice([25, 75]) a_stddev = 20 a = random.normalvariate(a_mean, a_stddev) # b is just random uniform b = random.uniform(0, 1) # Do other values return (category, a, b)
dc4db2db758454898a92a7953cb0f186e03a5ba8
18,281
import re def delete_links(string): """Delete links from input string Args: string (str): string to delete links Returns: str: string without links """ return re.sub(r'http\S+', '', string)
c452e25f245b7b791800cff8ca31ce75598e06c9
18,291
def one_text_header(response, header_name): """ Retrieve one text header from the given HTTP response. """ return response.headers.getRawHeaders(header_name)[0].decode("utf-8")
afc4b3bba143a092173a935ede77ff8ecec6d7e3
18,303