content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def normalize_time(timestamp): """Normalize time in arbitrary timezone to UTC""" offset = timestamp.utcoffset() return timestamp.replace(tzinfo=None) - offset if offset else timestamp
bd8b7f0417afabe8c58eba3393e7d466c35a70be
24,875
def round_up(n, size): """ Round an integer to next power of size. Size must be power of 2. """ assert size & (size - 1) == 0, "size is not power of 2" return ((n - 1) | (size - 1)) + 1
02f34fd5f2c059a9ee1b657f099b4699d90dfc01
24,878
def recombination(temperature): """ Calculates the helium singlet and triplet recombination rates for a gas at a certain temperature. Parameters ---------- temperature (``float``): Isothermal temperature of the upper atmosphere in unit of Kelvin. Returns ------- alpha_rec_1 (``float``): Recombination rate of helium singlet in units of cm ** 3 / s. alpha_rec_3 (``float``): Recombination rate of helium triplet in units of cm ** 3 / s. """ # The recombination rates come from Benjamin et al. (1999, # ADS:1999ApJ...514..307B) alpha_rec_1 = 1.54E-13 * (temperature / 1E4) ** (-0.486) alpha_rec_3 = 2.10E-13 * (temperature / 1E4) ** (-0.778) return alpha_rec_1, alpha_rec_3
c09e887053794a4c00b0daa3a48a57e563d89992
24,879
import torch def batch_quadratic_form(x: torch.Tensor, A: torch.Tensor) -> torch.Tensor: """ Compute the quadratic form x^T * A * x for a batched input x. Inspired by https://stackoverflow.com/questions/18541851/calculate-vt-a-v-for-a-matrix-of-vectors-v This is a vectorized implementation of out[i] = x[i].t() @ A @ x[i] x shape: (B, N) A shape: (N, N) output shape: (B) """ return (torch.matmul(x, A) * x).sum(1)
4e639fc210e944cdc6726c2daab85e486de58134
24,880
def _codepoint_is_ascii(ch): """ Returns true if a codepoint is in the ASCII range """ return ch < 128
931a3a67956bffd28f73e938e5d312951a2b3b80
24,881
import re def valid_email(email): """Check if entered email address is valid This checks if the email address contains a "@" followed by a "." Args: email (str): input email address Returns: bool: True if the input is a valid email and False otherwise """ # Ensure email is a string if not type(email) == str: return False # Find @ and . in the email address if re.match("[^@]+@[^@]+.[^@]+", email): return True else: return False
a675856a7bb8dae87a77990c9e752b0bb4177c9b
24,882
def get_iou(gt_bbx, pred_bbx): """ Calculate the Intersection over Union (IoU) of two bounding boxes. Based on: https://stackoverflow.com/questions/25349178/ calculating-percentage-of-bounding-box-overlap-for-image-detector-evaluation Parameters ---------- gt_bbx : dict ymin, xmin, ymax, xmax] Keys: {'xmin', 'xmax', 'ymin', 'ymax'} The (xmin, ymin) position is at the top left corner, the (xmax, ymax) position is at the bottom right corner pred_bbx : dict Keys: {'xmin', 'xmax', 'ymin', 'ymax'} The (xmin, ymin) position is at the top left corner, the (xmax, ymax) position is at the bottom right corner Returns ------- float in [0, 1] """ assert gt_bbx['xmin'] < gt_bbx['xmax'] assert gt_bbx['ymin'] < gt_bbx['ymax'] assert pred_bbx['xmin'] < pred_bbx['xmax'] assert pred_bbx['ymin'] < pred_bbx['ymax'] # determine the coordinates of the intersection rectangle x_left = max(gt_bbx['xmin'], pred_bbx['xmin']) y_top = max(gt_bbx['ymin'], pred_bbx['ymin']) x_right = min(gt_bbx['xmax'], pred_bbx['xmax']) y_bottom = min(gt_bbx['ymax'], pred_bbx['ymax']) if (x_right < x_left) or (y_bottom < y_top): iou = 0.0 intersection_area = (x_right - x_left) * (y_bottom - y_top) else: # The intersection of two axis-aligned bounding boxes is always an # axis-aligned bounding box intersection_area = (x_right - x_left) * (y_bottom - y_top) # compute the area of both BBs gt_bbx_area = (gt_bbx['xmax']-gt_bbx['xmin']) * \ (gt_bbx['ymax']-gt_bbx['ymin']) pred_bbx_area = (pred_bbx['xmax']-pred_bbx['xmin']) * \ (pred_bbx['ymax']-pred_bbx['ymin']) # compute the intersection over union by taking the intersection # area and dividing it by the sum of prediction + ground-truth # areas - the interesection area iou = intersection_area / \ float(gt_bbx_area + pred_bbx_area - intersection_area) assert iou >= 0.0 assert iou <= 1.0 return iou, intersection_area
20ce6ed931e1a26ed078113191c6de06c4030510
24,884
def directory_get_basename(path: str) -> str: """Returns the last directory in a path.""" p = path if p.endswith('/'): p = p[:-1] elif '.' in p: p = p[:p.rfind('/')] return p[p.rfind('/') + 1:]
69af9336142c88cd705162b2e4522aef2ac95403
24,889
def _return_quantity(quantity, return_quantity, units_out=''): """Helper method to return appropriate unit type Parameters ---------- quantity : :class:`~vunits.quantity.Quantity` obj Quantity object to use return_quantity : bool If True, returns :class:`~vunits.quantity.Quantity` obj. Otherwise, return ``quantity.mag`` units_out : str, optional Units to return. Not required if ``return_quantity`` is True. Returns ------- out : :class:`~vunits.quantity.Quantity` obj or float Value to return based on ``return_quantity``. """ if return_quantity: return quantity else: return quantity(units_out)
de68065aad70134e4e7778e099711013a9930e13
24,890
def _get_currency_pair(currency, native): """ Format a crypto currency with a native one for the Coinbase API. """ return '{}-{}'.format(currency, native)
92c3f43d1661f912a6bb63d14df1a7095eb784f3
24,891
import six def sanitize_command_output(content): """Sanitizes the output got from underlying instances. Sanitizes the output by only returning unicode characters, any other characters will be ignored, and will also strip down the content of unrequired spaces and newlines. """ return six.text_type(content, errors='ignore').strip()
55a42507b24d2fcb3993cfa8abda0cd04e4c7718
24,892
def countmatch(str1, str2, countstr): """checks whether countstr occurs the same number of times in str1 and str2""" return str1.count(countstr) == str2.count(countstr)
a25f77cc6b847ff6f9b81af33836b3e4a715b056
24,894
def get_histogram_limits(filename): """Read a histogram file `filename' and return the smallest and largest values in the first column.""" hmin = 0 hmax = 0 with open(filename, "r") as f: line = f.readline().split("\t") hmin = float(line[0]) for line in f: line = line.split("\t") hmax = line[0] hmax = float(hmax) return (hmin, hmax)
8ceed4f939c40f0266afa9f3218073842578f26f
24,897
from datetime import datetime def str2timestamp(s, fmt='%Y-%m-%d-%H-%M'): """Converts a string into a unix timestamp.""" dt = datetime.strptime(s, fmt) epoch = datetime.utcfromtimestamp(0) return (dt - epoch).total_seconds()
69dd680623da8a61837676b540e1d5c053c8e198
24,899
def is_cached(o, name): """Whether a cached property is already computed. Parameters ---------- o : object The object the property belongs to. name : str Name of the property. Returns ------- bool True iff the property is already computed. Examples -------- >>> class MyClass(object): ... @cached_property ... def my_cached_property(self): ... print('Computing my_cached_property...') ... return 42 ... @cached_property ... def my_second_cached_property(self): ... print('Computing my_second_cached_property...') ... return 51 >>> my_object = MyClass() >>> my_object.my_cached_property Computing my_cached_property... 42 >>> is_cached(my_object, 'my_cached_property') True >>> is_cached(my_object, 'my_second_cached_property') False """ return name in o.__dict__
eb7b1356ded56dddb4cd917b27461e9108bd7b76
24,900
def minutes_to_human_duration(minutes_duration): """ Convert a duration in minutes into a duration in a cool format human readable """ try: hours,minutes = divmod(minutes_duration,60) return "%sh %smin" %(hours,minutes) except TypeError: return None
22197c568505e366d5d4f6b020f8b61466deb43a
24,907
def chart_filter(df, year = None, month = None, neighbourhood = None, crime = None): """ Filters the given database in order to wrange the database into the proper dataframe required the graphs to display relevant information. Default value of None will allow the maps to display every single data point. Given specific information will filter the database Parameters ---------- df : Pandas Data Frame Dataframe of crime data year : int or list year or years of crime committed to be displayed in the graphs month : int or list month or months of crime commited to be displayed in the graphs neighbourhood : string or list neighbourhood or neighbourhoods of where crime occurs crime : string or list crime or crimes commited to be displayed Returns ------- Pandas Data Frame A filtered data frame or relevant information """ filtered_df = df if year != None: if type(year) == list: year_list = list(range(year[0], year[1]+1)) filtered_df = filtered_df.query('YEAR == %s' % year_list) else: filtered_df = filtered_df.query('YEAR == %s' % year) if month != None: if type(month) == list: month_list = list(range(month[0], month[1]+1)) filtered_df = filtered_df.query('MONTH == %s' % month_list) else: filtered_df = filtered_df.query('MONTH == %s' % month) if neighbourhood != None: if neighbourhood == []: neighbourhood = None elif type(neighbourhood) == list: filtered_df = filtered_df.query('DISTRICT == %s' % neighbourhood) else: filtered_df = filtered_df.query('DISTRICT == "%s"' % neighbourhood) if crime != None: if crime == []: crime = None elif type(crime) == list: filtered_df = filtered_df.query('OFFENSE_CODE_GROUP == %s' % crime) else: filtered_df = filtered_df.query('OFFENSE_CODE_GROUP == "%s"' % crime) return filtered_df
0958af7abd9d302adc2aff91b081d309a88a505e
24,908
def fibonacciAtIndex(index): """ Returns the fibonacci number at a given index. Algorithm is using 3 variables instead of recursion in order to reduce memory usage and runtime """ a1, a2 = 1,1 position = 3 # while position <= index: while index > 2: temp = a1 a1 = a2 a2 += temp # position += 1 index -= 1 return a2
2dde77a70bfe8d1c32777910521a9b4695ab326e
24,910
from typing import Tuple from typing import List def define_2d_correlations_hists() -> Tuple[List[str], List[float], List[Tuple[str, float, float, str]]]: """ Define the 2D correlation hists. Args: None. Returns: (ep_orientation values, assoc_pt values, (ep_orientation, lower_pt_bin_edge, upper_pt_bin_edge, name)) """ ep_orientations = ["all", "in", "mid", "out"] assoc_pt = [1.0, 1.5, 2.0, 3.0, 4.0, 5.0, 6.0, 10.0] # Example name: "raw2DratioallAss1015J2040C3050bg0812sig06rbX2rbY2.root", names = [ (ep, pt_1, pt_2, f"raw2Dratio{ep}Ass{int(pt_1 * 10)}{int(pt_2 * 10)}J2040C3050bg0812sig06rbX2rbY2") for ep in ep_orientations for pt_1, pt_2 in zip(assoc_pt[:-1], assoc_pt[1:]) ] return ep_orientations, assoc_pt, names
00a0f59fe7c238a271fa75a83085556fc51f4acc
24,913
def _ParseClassNode(class_node): """Parses a <class> node from the dexdump xml output. Returns: A dict in the format: { 'methods': [<method_1>, <method_2>] } """ methods = [] for child in class_node: if child.tag == 'method': methods.append(child.attrib['name']) return {'methods': methods, 'superclass': class_node.attrib['extends']}
2d801173230a065e668b89cac155b31991cee656
24,921
import select def _is_readable(socket): """Return True if there is data to be read on the socket.""" timeout = 0 (rlist, wlist, elist) = select.select( [socket.fileno()], [], [], timeout) return bool(rlist)
79b258987171e3a5e4d3bab51a9b8c9a59e2415e
24,922
def export_sheet(ss, sheet_id, export_format, export_path, sheet_name): """ Exports a sheet, given export filetype and location. Allows export format 'csv', 'pdf', or 'xlsx'. :param ss: initialized smartsheet client instance :param sheet_id: int, required; sheet id :param export_format: str, required; 'csv', 'pdf', or 'xlsx' :param export_path: str, required; filepath to export sheet to :param sheet_name: str, required; name of sheet exported :return: str, indicating failure or success, with path, filename, extension """ if export_format == 'csv': ss.Sheets.get_sheet_as_csv(sheet_id, export_path) elif export_format == 'xlsx': ss.Sheets.get_sheet_as_excel(sheet_id, export_path) elif export_format == 'pdf': # there is an optional paperSize parameter; default is A1 ss.Sheets.get_sheet_as_pdf(sheet_id, export_path) if export_format == 'csv' or export_format == 'xlsx' or export_format == 'pdf': return 'Sheet exported to {}{}.{}'.format(export_path, sheet_name, export_format) else: return 'export_format \'{}\' is not valid. Must be \'csv\', \'pdf\', or \'xlsx\''.format(export_format)
76b49fa0904140571eb84526f6021448db54dea9
24,924
def adapt_sample_keys(sample_list, key_type): """ Converts sample_list to a new format where instead of "scene_id", "object_id" and "ann_id" there is a "sample_id". :param key_type: 'kkk' for {scene_id}-{object_id}_{ann_id} 'kk' for {scene_id}-{object_id} 'k' for {scene_id} :return: new sample list. """ assert key_type in ['kkk', 'kk', 'k'] up_sl = [] for item in sample_list: if key_type == 'kkk': key_format = '{}-{}_{}' item['sample_id'] = key_format.format(item['scene_id'], item['object_id'], item['ann_id']) up_sl.append(item) elif key_type == 'kk': key_format = '{}-{}' item['sample_id'] = key_format.format(item['scene_id'], item['object_id']) up_sl.append(item) elif key_type == 'k': key_format = '{}' item['sample_id'] = key_format.format(item['scene_id']) up_sl.append(item) else: pass return up_sl
8845da67ee9627cf377efc1c7b789eaa4cfb2c65
24,930
import types import inspect def get_pytorch_model(module, model_settings): """ Define a DeepSphere-Weather model based on model_settings configs. The architecture structure must be define in the 'module' custom python file Parameters ---------- module : module Imported python module containing the architecture definition. model_settings : dict Dictionary containing all architecture options. """ if not isinstance(module, types.ModuleType): raise TypeError("'module' must be a preimported module with the architecture definition.") # - Retrieve the required model arguments DeepSphereModelClass = getattr(module, model_settings['architecture_name']) fun_args = inspect.getfullargspec(DeepSphereModelClass.__init__).args model_args = {k: model_settings[k] for k in model_settings.keys() if k in fun_args} # - Define DeepSphere model model = DeepSphereModelClass(**model_args) return model
a32a01163b71b1610c97adad1b3febfa1d4d5a25
24,932
import torch def get_criterion(config): """ Creates the torch criterion for optimization """ if config["name"] == "cross_entropy": return torch.nn.CrossEntropyLoss() else: raise NotImplementedError
2d1a9cc5983ab64fe2016637c292de3b9551079b
24,939
def is_magic(attribute): """Check if attribute name matches magic attribute convention.""" return all([ attribute.startswith('__'), attribute.endswith('__')])
d392a34902ce22127d92bc4d678228d59b97cba9
24,944
def expand_row(header, row): """Parse information in row to dict. Args: header (dict): key/index header dict row (List[str]): sambamba BED row Returns: dict: parsed sambamba output row """ thresholds = {threshold: float(row[key]) for threshold, key in header['thresholds'].items()} data = { 'chrom': row[0], 'chromStart': int(row[1]), 'chromEnd': int(row[2]), 'sampleName': row[header['sampleName']], 'readCount': int(row[header['readCount']]), 'meanCoverage': float(row[header['meanCoverage']]), 'thresholds': thresholds, 'extraFields': row[header['extraFields']] } return data
79ea16b498f6fd5c7c002bf822a34da65de0d2c9
24,945
import pickle def retrieve_model(filename='model.pkl'): """ Retrieve probability model pickled into a file. """ with open(filename, 'rb') as modfile: return pickle.load(modfile)
3e05c3c9b3bc2bc3974dab422ced5f53589c2823
24,949
def aggregator(df, column): """ Return multiple aggregate data values, compiled into a list. summ (total), minn (lowest value), maxx (highest value), avg (mean), med (median), mode (most repeated value). Parameters ---------- df : pandas object dataFrame from which to pull the values column : string column to focus on for creating the aggregate data Returns ------- list of floats [summ, minn, maxx, avg, med, mode] Usage / Example --------------- ps4_sum, _, ps4_max, ps4_avg, _, ps4_mode = aggregator(df_ps4, "Global_Sales") NOTE : values you don't want saved to an object, like min/med in above examples, were ignored by using the underscore character instead of an object name. """ summ = df[column].sum().__round__(3) # Total sales for games on this system minn = df[column].min().__round__(3) # Lowest sales for a game on this sytem maxx = df[column].max().__round__(3) # Highest sales for a game on this system avg = df[column].mean().__round__(3) # Average sales for games on this system med = df[column].median().__round__(3) # Median sales for games on this sytem mode = df[column].mode().__round__(3) # Most repeated value for games sales on this system return [summ, minn, maxx, avg, med, mode]
f940270662715859f42e6b9ffc4411c492085651
24,951
def gen_compare_cmd(single_tree, bootstrapped_trees): """ Returns a command list for Morgan Price's "CompareToBootstrap" perl script <list> Input: single_tree <str> -- path to reference tree file (Newick) bootstrapped_trees <str> -- path to bootstrapped trees file (Newick) """ cmp_prog_path = '/home/alexh/bin/MOTreeComparison/CompareToBootstrap.pl' compare_cmd = ['perl', cmp_prog_path, '-tree', single_tree, '-boot', bootstrapped_trees] return compare_cmd
df9d2d4c21ca28012107b8af69706d45804e5637
24,953
def silent_none(value): """ Return `None` values as empty strings """ if value is None: return '' return value
48b9709dc4bffc659b168f625f4f6be5608a645d
24,955
def splinter_screenshot_encoding(request): """Browser screenshot html encoding.""" return "utf-8"
17e6055332e7bc63778e1e80ba01c1631b0c876e
24,960
import math def rms_mean(elements): """ A function to calculate the root mean squared mean value of a list of elements :param elements: a list of elements :return: root mean squared mean value """ return math.sqrt(sum(x * x for x in elements) / len(elements))
a1178e70f210063c6559fa15789bfd15c1f89b79
24,961
from typing import Any def maybebool(value: Any) -> bool: """ A "maybified" version of the bool() function. """ return bool(value)
e40d112291ce7bfb58d94208be662cb5370506d9
24,964
import random def digits() -> str: """Generate a random 4 digit number.""" return str(random.randint(1111, 9999))
8d4f9195e74c2b1b0c31a108a502a028239bea8e
24,965
from typing import Sequence def crop_to_bbox_no_channels(image, bbox: Sequence[Sequence[int]]): """ Crops image to bounding box (in spatial dimensions) Args: image (arraylike): 2d or 3d array bbox (Sequence[Sequence[int]]): bounding box coordinated in an interleaved fashion (e.g. (x1, x2), (y1, y2), (z1, z2)) Returns: arraylike: cropped array """ resizer = tuple([slice(_dim[0], _dim[1]) for _dim in bbox]) return image[resizer]
0d6d4a2c77be0343b7557485e06fd8c33a49e72f
24,969
def mean_std(feature_space, eps=1e-5): """ Calculates the mean and standard deviation for each channel Arguments: feature_space (torch.Tensor): Feature space of shape (N, C, H, W) """ # eps is a small value added to the variance to avoid divide-by-zero. size = feature_space.size() assert (len(size) == 4), "Feature space shape is NOT structured as N, C, H, W!" N, C = size[:2] feat_var = feature_space.view(N, C, -1).var(dim=2) + eps feature_std = feat_var.sqrt().view(N, C, 1, 1) feature_mean = feature_space.view(N, C, -1).mean(dim=2).view(N, C, 1, 1) return feature_mean, feature_std
2b228edab0397257b1deacdecae95265d955c76c
24,976
import torch def bbox_transform(boxes, gtboxes): """ Bounding Box Transform from groundtruth boxes and proposal boxes to deltas Args: boxes: [N, 4] torch.Tensor (xyxy) gtboxes: [N, 4] torch.Tensor (xywh) Return: delta: [N, 4] torch.Tensor """ gt_w = gtboxes[:, 2] - gtboxes[:, 0] + 1 gt_h = gtboxes[:, 3] - gtboxes[:, 1] + 1 # center gt_x = gtboxes[:, 0] + 0.5 * gt_w gt_y = gtboxes[:, 1] + 0.5 * gt_h # Anchors [x,y,w,h] anchor_x = boxes[:, 0] anchor_y = boxes[:, 1] anchor_w = boxes[:, 2] anchor_h = boxes[:, 3] delta_x = (gt_x - anchor_x) / anchor_w delta_y = (gt_y - anchor_y) / anchor_h delta_w = torch.log(gt_w / anchor_w) delta_h = torch.log(gt_h / anchor_h) # [N, 4] return torch.stack([delta_x, delta_y, delta_w, delta_h]).transpose(0, 1)
fa2bf83d24206b83508612ac728636905e80ebcc
24,981
from typing import Union from pathlib import Path import pickle def load_picke(ffp: Union[Path, str]): """Loads the pickle file""" with open(ffp, "rb") as f: return pickle.load(f)
39145f2c1dd51226f19b89aaeb984a9434ebb06c
24,985
from typing import List def replace_words_with_prefix_hash(dictionary: List[str], sentence: str) -> str: """ Replace words in a sentence with those in the dictionary based on the prefix match. Intuition: For each word in the sentence, we'll look at successive prefixes and see if we saw them before Algorithm: Store all roots in a set. Then for each word, look at successive prefixes for that word. If we find a prefix that is root, replace the word with that prefix. Otherwise, the prefix will just be the word itself, and we should add that to the final sentence Complexity Analysis: - Time Complexity: O(∑w i2) where wi is the length of the i-th word. We might check every prefix, the i-th of which is O(w_i^2) work. - Space Complexity: O(N) where N is the length of our sentence; the space used by root_set. @param dictionary: List of roots @param sentence: words separated by space to perform replacement on @return: New sentence with replaced words """ root_set = set(dictionary) def replace(word: str) -> str: for x in range(1, len(word)): if word[:x] in root_set: return word[:x] return word return " ".join(map(replace, sentence.split()))
bb46b0dc61eab2be358d44f8fb46782f867e3c30
24,990
def register_endpoints(app): """Register endpoints defined in openapi.yml Additionally register any flask endpoints / blueprints Parameters ---------- app : Connexion A Connexion application instance. """ app.add_api('openapi.yml', strict_validation=True) # Add health check @app.app.route('/health/pricing') def health_check(): """Pingable endpoint used to determine whether the service is running """ return '', 200
5c7990be3bde5c7d99cbac1e34a98aa6df5761e3
24,991
def is_project_admin_context(context): """Check if this request has admin context with in the project.""" if context.is_admin: return True return False
a2365f7a0d830cdbb3ca76347b5d152d42ce178e
24,992
def get_last_timestamp(db): """Return the last update timestamp from the database""" return db.execute('SELECT update_time FROM meta_slurm_lastupdate').fetchone()[0]
bce062d16b00e46c9ff5943e99329885fd1a0dc9
25,000
def line_parameter_form_from_points(a): """ Parameters of line in parameter form: x = v + t * u, determined from points in a. Only the first and last element of a are used. """ # u = a[0] - a[-1] u = a[-1] - a[0] v = a[0] return u, v
d8f6abcbacc9bca3feea8f556630a4499512b8b1
25,002
def calc_db_cost_v3(db) -> float: """Returns a noise cost for given dB: every 10 dB increase doubles the cost (dB >= 45 & dB <= 75). """ if db <= 44: return 0.0 db_cost = pow(10, (0.3 * db)/10) return round(db_cost / 100, 3)
bad2506da5cb53aa552294498122e70dd95f780f
25,005
def read_chrom_sizes(chrom_sizes): """Read chrom.sizes file.""" ret = {} f = open(chrom_sizes, "r") for line in f: ld = line.rstrip().split("\t") ret[ld[0]] = int(ld[1]) f.close() return ret
ccba3e56f006746d1e34953364d3f8a40fc70086
25,009
def serializeRegressor(tree): """ Convert a sklearn.tree.DecisionTreeRegressor into a JSON-compatible format """ LEAF_ATTRIBUTES = ['children_left', 'children_right', 'threshold', 'value', 'feature', 'impurity', 'weighted_n_node_samples'] TREE_ATTRIBUTES = ['n_classes_', 'n_features_', 'n_outputs_'] encoded = { 'nodes': {}, 'tree': {}, 'n_leaves': len(tree.tree_.threshold), 'params': tree.get_params() } for attr in LEAF_ATTRIBUTES: encoded['nodes'][attr] = getattr(tree.tree_, attr).tolist() for attr in TREE_ATTRIBUTES: encoded['tree'][attr] = getattr(tree, attr) return encoded
d94f8cde0144cd842175480332a398def8e19ae8
25,012
def linear_probe(h, i, m): """ Finds a possible next position using linear probing :param h: Computed hash value that has resulted in a collision :param i: Offset :param m: Size of the table :return: The next index to be checked if it is open """ return (h + i) % m
49ecb3ca389255b99bdde3e61bb503d3d517549b
25,013
def categorize(contenttype: str): """Return 'cbor', 'json' or 'link-format' if the content type indicates it is that format itself or derived from it.""" media_type, *_ = contenttype.split(';') _, _, subtype = media_type.partition('/') if subtype == 'cbor' or subtype.endswith('+cbor'): return 'cbor' if subtype == 'json' or subtype.endswith('+json'): return 'json' if media_type == 'application/link-format': return 'link-format' return None
d065c9c3823bda424108d70a8e5b8d7dc70eab9b
25,016
def colrows_to_xy(screen_size, cursor_position): """Convert cursor position to x, y pixel position Args: screen_size (tuple): The screen size (width, height) cursor_position (tuple): The cursor position (row, col) Returns: (tuple): The screen position in pixels (x, y) """ x = (8 * (cursor_position[0] - 1)) y = (screen_size[1] - 2) - (cursor_position[1] * 10) return (x, y)
63d3b9f7af789c613d2067fe0ceb671e5ed04465
25,019
from typing import Optional def irepeat(obj: object, cnt: Optional[int] = None) -> object: """ Yield the object cnt times if specified or infinitely. Notes ----- The Python itertools repeat class implementation. Parameters ---------- obj : object to be repeated. cnt : Optional[int], optional the number of times counter. The default is None. Raises ------ TypeError if object is not provided or cnt is not integer. References ---------- https://docs.python.org/3/library/itertools.html#itertools.repeat Yields ------ object """ def _repeat(obj: object, cnt: Optional[int] = None) -> object: """Yield repeat generator.""" if cnt is None: while True: yield obj else: for _ in range(cnt): yield obj if not (cnt is None or isinstance(cnt, int)): raise TypeError(f'cnt = {cnt} is not integer') return _repeat(obj, cnt)
7d0c3cefb763d099c75ebfd070368882004d1cf0
25,024
def max_pairwise_product(numbers): """ max_pairwise_product gets the two biggest numbers and returns the product of them TimeComplexity: O(n) """ biggest = -9999999999999 second_bigest = -9999999999999 for ele in numbers: if ele > biggest: biggest, second_bigest = ele, biggest elif ele > second_bigest: second_bigest = ele return biggest * second_bigest
470a1400fc235de7c4a6eb459577f674717b6ced
25,025
from typing import List import glob def filter_filetype(filetype: str) -> List: """ Filtra según el tipo de archivo indicado. Args: filetype: Tipo de archivo a filtrar (ej: *.png). Returns: Lista de coincidencias según el tipo filtrado. """ return glob.glob(filetype)
45ea03ac4a148d2817df1c5fdea2969e395dfcaa
25,029
import json def deserializeValue(v): """ Deserialize single value from JSON string format """ try: return json.loads(v) except ValueError: raise ValueError("No JSON object could be decoded from \"%s\"" % v)
5bc26f42f7873030d9bf79a502c2604433df150f
25,030
def hydrobasins_upstream_ids(fid, df): """Return a list of hydrobasins features located upstream. Parameters ---------- fid : feature id HYBAS_ID of the downstream feature. df : pd.DataFrame Watershed attributes. Returns ------- pd.Series Basins ids including `fid` and its upstream contributors. """ def upstream_ids(bdf, bid): return bdf[bdf['NEXT_DOWN'] == bid]['HYBAS_ID'] # Locate the downstream feature ds = df.set_index("HYBAS_ID").loc[fid] # Do a first selection on the main basin ID of the downstream feature. sub = df[df['MAIN_BAS'] == ds['MAIN_BAS']] # Find upstream basins up = [fid, ] for b in up: tmp = upstream_ids(sub, b) if len(tmp): up.extend(tmp) return sub[sub['HYBAS_ID'].isin(up)]
3fed2e9f5bad0d58bd21175750b6e733c930b952
25,032
import functools def with_header(header=None): """Decorator that adds a section header if there's a any output The decorated function should yield output lines; if there are any the header gets added. """ def wrap(func): @functools.wraps(func) def wrapped(cls, remaining_attrs): result = list(func(cls, remaining_attrs)) if result: # Sphinx/ReST doesn't allow "-----" just anywhere :( yield u'' yield u'.. raw:: html' yield u'' yield u' <hr>' yield u'' if header: yield header + u':' yield u'' for row in result: yield row return wrapped return wrap
653e6a710acdca1b6ac28a6ed5ffb3ac60c84bc0
25,034
from typing import Union from pathlib import Path def is_relative_to(absolute_path: Union[str, Path], *other) -> bool: """Return True if the given another path is relative to absolute path. Note: Adapted from Python 3.9 """ try: Path(absolute_path).relative_to(*other) return True except ValueError: return False
72076c54a024de3aa83d5355aaa1e04838f53f1e
25,046
import yaml def get_config(config_file): """ Read CircleCI config YAMLs as dictionaries """ with open(config_file) as fstream: try: return yaml.safe_load(fstream) except yaml.YAMLError as err: print(err)
13005c11aab352ff96ef422d724ef91ec96074cb
25,048
from typing import Any def is_iterable(obj: Any) -> bool: """ Return whether an object is iterable or not. :param obj: Any object for check """ try: iter(obj) except TypeError: return False return True
4053ede1d7ac825ec0e9723892df9e5fe638e8b6
25,049
def get_x_y(df): """ Split the dataframe into the features and the target :param df: The data frame :return: X, y - The features and the target respectively """ X = df.drop('isFraud', axis=1) y = df.isFraud return X, y
902aba32c2ed36b31ac2e59a804a0ad009fb32d2
25,051
def get_child_schema(self): """An optional function which returns the list of child keys that are associated with the parent key `docs` defined in `self.schema`. This API returns an array of JSON objects, with the possible fields shown in the example. Hence the return type is list of lists, because this plugin returns a list of objects, each with this possible set of keys. Returns: [['year', 'title', 'description', 'mediatype', 'publicdate', 'downloads', 'week', 'month', 'identifier', 'format', 'collection', 'creator', 'score']] Example of one of the child objects in the array associated with `docs`: { year: 1998, title: "AAPL CONTROL ROOM AERO ACOUSTIC PROPULSION LABORATORY AND CONTROL ROOM PERSONNEL", description: "AAPL CONTROL ROOM AERO ACOUSTIC PROPULSION LABORATORY AND CONTROL ROOM PERSONNEL", mediatype: "image", publicdate: "2009-09-17T17:14:53Z", downloads: 5, week: 0, month: 0, identifier: "GRC-C-1998-853", format: [ "JPEG", "JPEG Thumb", "Metadata" ], collection: [ "nasa", "glennresearchcentercollection" ], creator: [ "NASA/Glenn Research Center" ], score: 2.617863 } """ return [['year', 'title', 'description', 'mediatype', 'publicdate', 'downloads', 'week', 'month', 'identifier', 'format', 'collection', 'creator', 'score']]
4d91ff18ab8e3f6cec5610169dc6d52e7a647960
25,052
from typing import Tuple def calculate_broadcasted_elementwise_result_shape( first: Tuple[int, ...], second: Tuple[int, ...], ) -> Tuple[int, ...]: """Determine the return shape of a broadcasted elementwise operation.""" return tuple(max(a, b) for a, b in zip(first, second))
5d565b2b5f38c84ab1f1573f4200fc65d6ae8e6a
25,055
from typing import List from typing import Dict from typing import Any from typing import Tuple from typing import Optional def prepare_outputs_for_categories(records: List[Dict[str, Any]]) -> \ Tuple[List[Dict[str, Optional[Any]]], List[Dict[str, Optional[Any]]]]: """ Prepares human readables and context output for 'bmc-remedy-category-details-get' command. :param records: List containing records of categories from rest API. :return: Tuple containing human-readable and context-ouputs. """ outputs = list() hr_output = list() for each_record in records: temp = dict() temp1 = dict() temp["Id"] = temp1["Id"] = each_record.get("Id") temp["Name"] = temp1["Name"] = each_record.get("Name") temp["Children Count"] = temp1["ChildrenCount"] = each_record.get("BMCServiceDesk__children__c") hr_output.append(temp) outputs.append(temp1) return hr_output, outputs
8215214d79f46666aec167ed2b99bf73663692eb
25,057
import math def divide_into_subsets(list_of_element, subset_size): """ Given a list of elements, divide into subsets. e.g. divide_into_subsets([1,2,3,4,5], subset_size=2) == [[1, 2], [3, 4], [5]] :param list_of_element: :param subset_size: :return: """ element_gen = (el for el in list_of_element) return [[nextel for _, nextel in zip(range(subset_size), element_gen)] for _ in range(int(math.ceil(float(len(list_of_element))/subset_size)))]
7014135efd2866e42be78def18c578eaef0e9a4e
25,063
def parse_str(s): """Try to parse a string to int, then float, then bool, then leave alone""" try: return int(s) except ValueError: try: return float(s) except ValueError: if s.lower() == 'true': return True if s.lower() == 'false': return False return s
251cd0bdbf47464a6a424e012ef22cbf0f38452f
25,066
def generate_lambda_key(domain): """Generate the S3 key name for the lambda's zip file. Args: domain (str): Use the domain as part of the key. Returns: (str) """ key = 'dynamodb_autoscale.' + domain + '.zip' return key
d8dcd6897c2773a1ba31df72c2e8336badc42e68
25,067
def remove_brace(value): """Remove braces (which indicete capital leters in latex). Args: value (str): string which have ``{``, ``}``. Returns: str (``{``, ``}`` is removed.) Examples: >>> val = "The {CNN}-based ..." >>> remove_brace(val) "The CNN-based ..." """ value = str(value).replace("{", "").replace("}", "") return value
b4144fde9890128572a1f88caa26f6515f13b924
25,070
def generate_list_articles(bib): """Description of generate_list_articles From the bib file generates a ReadMe-styled table like: | [Name of the article](Link to the .pdf) | Code's link if available | """ articles = "" for entry in bib: if "title" in entry: if "link" in entry: articles += "| [" + entry["title"] + "](" + entry["link"] + ") | " else: articles += "| " + entry["title"] + " | " if "code" in entry: if "No" in entry["code"]: articles += "No " else: if "github" in entry["code"]: articles += "[GitHub" else: articles += "[Website" articles += "](" + entry["code"] + ") " else: articles += "No " articles += "|\n" return articles
362439030116376687f6d00bce5963a6a6587309
25,073
from typing import Union import torch def cam_init2orig(cam, scale: Union[float, torch.Tensor], start_pt: torch.Tensor, N=224): """ Args: cam (bs, 3): (s, tx, ty) scale (bs,): scale = resize_h / orig_h start_pt (bs, 2): (lt_x, lt_y) N (int): hmr_image_size (224) or IMG_SIZE Returns: cam_orig (bs, 3): (s, tx, ty), camera in original image coordinates. """ # This is camera in crop image coord. cam_crop = torch.cat( [N * cam[:, 0:1] * 0.5, cam[:, 1:] + (2. / cam[:, 0:1]) * 0.5], dim=1 ) # This is camera in orig image coord cam_orig = torch.cat( [cam_crop[:, 0:1] / scale, cam_crop[:, 1:] + (start_pt - N) / cam_crop[:, 0:1]], dim=1 ) return cam_orig
99edd5049b28cb09c479e9e1bef9c4c820bd2e12
25,074
def find_prefixed_labels(labels, prefix): """Util for filtering and cleaning labels that start with a given prefix. Given a list of labels, find only the specific labels with the given prefix. Args: prefix: String expected to be prefix of relevant labels labels: List of string labels Return: Filtered labels (i.e. all labels starting with prefix) """ changelog_labels = [] for l in labels: l = l.strip() if l.startswith(prefix) and len(l) > len(prefix): changelog_labels.append(l) return changelog_labels
49631b8cf257bad0e2c3ec1be4e2c48c5e49e963
25,075
import re def resolve_value(value): """ Convert "1k" to 1 000, "1m" to 1 000 000, etc. """ if value is None: return None tens = dict(k=10e3, m=10e6, b=10e9, t=10e12) value = value.replace(',', '') match = re.match(r'(-?\d+\.?\d*)([kmbt]?)$', value, re.I) if not match: return None factor, exp = match.groups() if not exp: return float(factor) return int(float(factor)*tens[exp.lower()])
ef3880c532f143b7bc3493d6cc942529329e040a
25,082
def pkcs7_pad(inp, block_size): """ Using the PKCS#7 padding scheme, pad <inp> to be a multiple of <block_size> bytes. Ruby's AES encryption pads with this scheme, but pycrypto doesn't support it. Implementation copied from pyaspora: https://github.com/mjnovice/pyaspora/blob/master/pyaspora/diaspora/protocol.py#L209 """ val = block_size - len(inp) % block_size if val == 0: return inp + (bytes([block_size]) * block_size) else: return inp + (bytes([val]) * val)
5a5aae6f588e5e67dc30c85ab6a6afcdb9c728c0
25,083
import torch def dr_transformation_cate( y: torch.Tensor, w: torch.Tensor, p: torch.Tensor, mu_0: torch.Tensor, mu_1: torch.Tensor, ) -> torch.Tensor: """ Transforms data to efficient influence function/aipw pseudo-outcome for CATE estimation Parameters ---------- y : array-like of shape (n_samples,) or (n_samples, ) The observed outcome variable w: array-like of shape (n_samples,) The observed treatment indicator p: array-like of shape (n_samples,) The treatment propensity, estimated or known. Can be None, then p=0.5 is assumed mu_0: array-like of shape (n_samples,) Estimated or known potential outcome mean of the control group mu_1: array-like of shape (n_samples,) Estimated or known potential outcome mean of the treatment group Returns ------- d_hat: EIF transformation for CATE """ if p is None: # assume equal p = torch.full(y.shape, 0.5) EPS = 1e-7 w_1 = w / (p + EPS) w_0 = (1 - w) / (EPS + 1 - p) return (w_1 - w_0) * y + ((1 - w_1) * mu_1 - (1 - w_0) * mu_0)
7bcff5f42f5aa664fd0250dd9faba49889621205
25,087
def create_adjusted_coefficients(targets): """ Create a dictionary of "adjusted-coefficient" elements (as XML text) for the given targets, where the key is the year and the value is the text for all elements starting at that year. """ template = '<adjusted-coefficient year="{year}">{coefficient}</adjusted-coefficient>\n' # reverse a copy of the targets targets = sorted(targets, key=lambda tup: tup[0], reverse=True) # sort by year, descending xml_dict = {} xml = '' for year, coefficient in targets: xml = template.format(year=year, coefficient=coefficient) + xml xml_dict[year] = xml return xml_dict
89f6ba9d6a1a1977ac4a175b28f8db652ba9ae37
25,093
def link_to_url(link): """ >>> from scrapy.link import Link >>> link_to_url(Link("http://example.com/?foo=bar")) 'http://example.com/?foo=bar' >>> link_to_url(Link("http://example.com/?foo=bar", fragment="id1")) 'http://example.com/?foo=bar#id1' >>> link_to_url(Link("http://example.com/?foo=bar", fragment="!start")) 'http://example.com/?foo=bar#!start' """ if link.fragment and link.fragment != '#': return "#".join([link.url, link.fragment]) return link.url
6a6bf4a1f33748175ac7a95899c1fcffcf9751b0
25,095
def zeroPrepender(source, length): """ Append extra zeros to a source number based on the specified length """ if (not source and source != 0) or not length: return None result = str(source) if len(result) >= length: return result for i in range(length - len(result)): result = '0' + result return result
ce9fc94e4b745f5782af818dcd7c66789857a342
25,105
def bounds_elementwise(lst): """Given a non-empty list, returns (mins, maxes) each of which is the same length as the list items. >>> bounds_elementwise([[0,6,0], [5,0,7]]) ([0,0,0], [5,6,7]) """ indices = list(range(len(lst[0]))) mins = [min(el[i] for el in lst) for i in indices] maxes = [max(el[i] for el in lst) for i in indices] return (mins, maxes)
5fa4fbe75db310d971005c88fc6d04058d3cd998
25,108
from typing import Counter def get_probs(occurrences): """ Computes conditional probabilities based on frequency of co-occurrences Parameters ---------- occurrences: occurences[x][y] number of times with (X=x and Y=y) Returns ------- probs : probs[x][y] = Pr(Y=y | X=x) reverse_probs : reverse_probs[y][x] = Pr(X=x | Y=y) """ probs = {} reverse_probs = {} y_occ = Counter() for x, ys in occurrences.items(): total = sum(ys.values()) probs[x] = {} for y, occ in ys.items(): probs[x][y] = occ / total y_occ[y] += occ for x, ys in occurrences.items(): for y, occ in ys.items(): reverse_probs.setdefault(y, {})[x] = occ / y_occ[y] return probs, reverse_probs
2e4dc69646a1800496bd5366bde1be78c3d18061
25,115
def get_countN(x,n): """Count the number of nucleotide n in the string.""" return x.upper().count(n.upper())
7e5577dcf2a5666f77a915dd943cd5f59e2bd260
25,124
import hashlib def fileSHA ( filepath ) : """ Compute SHA (Secure Hash Algorythm) of a file. Input : filepath : full path and name of file (eg. 'c:\windows\emm386.exe') Output : string : contains the hexadecimal representation of the SHA of the file. returns '0' if file could not be read (file not found, no read rights...) """ try: file = open(filepath,'rb') digest = hashlib.sha256() data = file.read(65536) while len(data) != 0: digest.update(data) data = file.read(65536) file.close() except: return '0' else: return digest.hexdigest()
98bddf8ef32c769b77dde704838c188e2b02ad49
25,128
import pickle def load_pickle(path): """ Load a WordGraph object from a pickle file. :param path: path to pickled WordGraph object :return: """ with open(path, 'rb') as input: # G = wg.WordGraph() return pickle.load(input)
3108097ee85f9947606c6b74bfe0d5ba12cea517
25,129
import yaml def yaml_config_file_provider(handle, cmd_name): # pylint: disable=unused-argument """Read yaml config file from file handle.""" return yaml.safe_load(handle)
e95295a0413290957d7b319b73876209ba11b5c6
25,130
def Command(*_args, **_kw): """Fake Command""" return ["fake"]
a3d435534a045fe1b08eaffc7327065492b07026
25,132
def argmin(l) -> int: """ From a previous AoC. >>> argmin([9,9,9,1,9]) 3 """ mini = 0 for i, e in enumerate(l): mini = i if e < l[mini] else mini return mini
4b8228dd94b57fc03a01865f2e67a5f17e997c5a
25,134
from typing import List def relationship_headers() -> List[str]: """ Produce headers for a relationship file. """ return ["from", "to", "relationship"]
7fdaa7d14f0fd7b8a2a01fd808d69aac6d41d9ca
25,135
def signe(a): """ return 0 if a<0 , 1 else""" if a < 0: return 0 else: return 1
71d0891f5dfb1d6d4ea3b8a9ad802e976de6e742
25,140
def _bool_value(element): """ Given an xml element, returns the tag text converted to a bool. :param element: The element to fetch the value from. :return: A boolean. """ return (element.text.lower() == "true")
d5eb1c94ec6a09b6a81508124f36bacf8c253fb8
25,143
def get_hr_val(choices, db_val): """ Get the human readable value for the DB value from a choice tuple. Args: choices (tuple): The choice tuple given in the model. db_val: The respective DB value. Returns: The matching human readable value. """ for pair in choices: if pair[0] == db_val: return pair[1] # Value not found. return None
26791ccc16e6b1e9399bc6d10b4292c7c7780ebd
25,145
def resolve_templating_engine(args): """ Figures out what templating engine should be used to render the stack """ # Figure out what templating engine to use. # Only use -t option when stack comes from stdin if args.stack.name == "<stdin>": return args.templating_engine elif ".mako" in args.stack.name[-5:]: return "mako" elif ".jinja" in args.stack.name[-6:]: return "jinja" elif ".yaml" in args.stack.name[-5:]: return "yaml" raise NotImplementedError("Templating engine not supported. Must be set " "to 'mako', 'jinja', or '' in the command line " "or by using the equivalent file extension")
aef956cd3a5a9cca8451f069a986407af631694e
25,147
def parse_value(value, base_value=0): """Parse a numeric value spec which is one of: NNN integer NN.MMM float NN% proportional to base_value """ if not isinstance(value, str): return value if value[-1] == '%': value = base_value * float(value[:-1]) / 100 elif '.' in value: return float(value) return int(value)
186ef21e453af617449294bc44d97765b28e6676
25,148
def _html_tidy_cell_item(item, key): """ Returns HTML-tidied item for putting in table cell. :param item: item to be tidied :param key: dictionary key (for reference) :return: tidied HTML item or string """ if isinstance(item, dict): resp = "<br/>\n".join(["{}: {}".format(key, value) for key, value in item.items()]) resp += "<br/>{}: SUCCESS!".format(int(key) + 1) return resp return item
748ad64ca025a76472d2146cd9e369f525470bee
25,152
def get_entrypoint(request_path): """ Get the entrypoint url from a request path, splitting off sub-indexes and query strings """ entrypoint = request_path.replace('/index', '').split('?')[0] if entrypoint == '': entrypoint = '/index' return entrypoint
52fec0fd6933e26bc38e26a52b3124d1a5914258
25,160
from typing import Dict from pathlib import Path def create_paths(run_id: str, run_dir: str = "runs/") -> Dict[str, Path]: """ Create the necessary directories and sub-directories conditioned on the `run_id` and run directory. :param run_id: Unique Run Identifier. :param run_dir: Path to run directory to save model checkpoints and run metrics. """ paths = { # Top-Level Directory for a Given Run "runs": Path(run_dir, run_id) } # Programatically Create Paths for each Directory for p in paths: paths[p].mkdir(parents=True, exist_ok=True) return paths
2448d2621647b084161312bf572a2294705ea713
25,162
def getDimensions(self): """Gets the number of rows and columns of the map""" return (self.__numrows__, self.__numcols__)
fa56d7ec97ba237fb41cc70bbbb7a2348a621f04
25,165
import re def convert_fa_spaces(input_value: str) -> str: """ Convert space between Persian MI and De-Yii to zero-width non-joiner (halfspace) char :param input_value: String contains persian chars :return: New string with converted space to half space char """ # u200C is the code for unicode zwnj character https://en.wikipedia.org/wiki/Zero-width_non-joiner repl = '\\2\u200C\\4' # replace space between persian MI. mi_pattern = r'((\s\u0645\u06CC)+([\s])+([\u0600-\u06EF]{1,}){1,})' result = re.sub(mi_pattern, repl, input_value, 0) # replace space between persian De-Yii. de_yii_pattern = r'(([\u0600-\u06EF]{1,})+([\s])+(ای|ایی|اند|ایم|اید|ام){1})' result = re.sub(de_yii_pattern, repl, result) return result
f295402cdb086b62c12abbf935bb12b24c223ca0
25,167
from multiprocessing import RLock as rlockp from threading import RLock as rlockt def create_rlock(process=False): """Creates a reentrant lock object.""" if process: return rlockp() else: return rlockt()
eb4cbdd72c5f649e3907cda0054c96913b722381
25,168
def to_hex_string(string: str) -> str: """Converts UTF-8 string into its hex representation :param string: str The string to convert to hex :return: Hex representation of the given string """ return string.encode('utf-8').hex()
62b9b71af31bccdde136aa6d2dabbb2ee3df2ea7
25,169
import random def sample(p): """Given an array of probabilities, which sum to 1.0, randomly choose a 'bin', e.g. if p = [0.25, 0.75], sample returns 1 75% of the time, and 0 25%; NOTE: p in this program represents a row in the pssm, so its length is 4""" r = random.random() i = 0 while r > p[i]: r -= p[i] i += 1 return i
886da94e2c9e35bd07ceba606de92d2126197b99
25,173
import random def cartesian_choice(*iterables): """ A list with random choices from each iterable of iterables is being created in respective order. The result list can be seen as an element of the Cartesian product of the iterables """ res = [] for population in iterables: res.append(random.choice(population)) return res
ad9ff73909b17b65d98e61c36ceef7b9ace1a1f3
25,177
def add(number_one, number_two): """ 两个数字相加 :param number_one:第一个数字 :param number_two:第二个数字 :return:相加后的结果 """ result = number_one + number_two return result
ac85b372ebf48c4a6b4dc67b61d74dfa6d9b4246
25,178