content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def rol(value, count): """A rotate-left instruction in Python""" for y in range(count): value *= 2 if (value > 0xFFFFFFFFFFFFFFFF): value -= 0x10000000000000000 value += 1 return value
ac6f4efee5d806201a04f83921bca47ac6e42ee8
702,991
import time import decimal def collatz(num): """ :type num: int; :param num: start number, any positive integer; :return [start number, [following_numbers], steps, time]; """ num_original = num following_nums = [] step = 0 start = time.time() while num != 1: # print(str(num)) if (num % 2) == 0: # Even num = int(num / 2) # n/2 following_nums.append(num) step += 1 else: # Odd num = int(3 * num + 1) # 3n+1 following_nums.append(num) step += 1 end = time.time() elapsed = decimal.getcontext().create_decimal(decimal.Decimal(end - start)) return [num_original, following_nums, step, round(elapsed, 10)]
82427afa292d9f7581a0964482d08e454b9012e5
702,992
def bubble_sort(vals): """Sort the given array using bubble sort.""" for i in range(len(vals) - 1): for j in range(len(vals) - i - 1): if vals[j] > vals[j + 1]: vals[j], vals[j + 1] = vals[j + 1], vals[j] return vals
ee0286dd53da0fbfc508fa1c26dbf913ba8f92ce
702,993
def public_byte_prefix(is_test): """Address prefix. Returns b'\0' for main network and b'\x6f' for testnet""" return b'\x6f' if is_test else b'\0'
3ecc4fe0cbbd8dee1a91f90e0112d9be3184fc73
702,994
import json def _json_read(filename): """Read a json into a dict.""" with open(filename) as file: return json.load(file)
84187d2a2281d2725adb8dae903253bdcd41e2b9
702,996
def addLists(list1, list2): """Add lists together by value. i.e. addLists([1,1], [2,2]) == [3,3].""" # Find big list and small list blist, slist = list(list2), list(list1) if len(list1) > len(list2): blist, slist = slist, blist # Overlay small list onto big list for i, b in enumerate(slist): blist[i] += b return blist
f5469dab8fd2c62d2d3ffed253803c1a3d343281
702,997
import tarfile def files_from_archive(tar_archive: tarfile.TarFile): """ Extracts only the actual files from the given tarfile :param tar_archive: the tar archive from which to extract the files :return: List of file object extracted from the tar archive """ file_members = [] # Find the actual files in the archive for member in tar_archive.getmembers(): if member.isreg(): # extract the actual files from the archive file_members.append(member) files = [] file_names = [] for file_member in file_members: files.append(tar_archive.extractfile(file_member)) # Extract the file names without the top level directory from the file members file_names.append("/".join(file_member.name.split("/")[1:])) return files, file_names
09ead0b2b955afdc5bf96a8e0a8717989c155406
702,998
def help(term, arabic_column, english_column): """ show all details of word""" exclude_keys = [arabic_column, english_column] details = {k: term[k] for k in set(list(term.keys())) - set(exclude_keys)} return details
c1118fd1240802a4ba1f7976d21806eb7302276c
702,999
def nodify(n): """ Modifies string to contain node#mod_ :param n: string :return: string """ return 'node#mod_{}'.format(n)
b0d09ded891e369d463f44404501d82e5f266941
703,000
def calculate_rescue_time_pulse(very_distracting, distracting, neutral, productive, very_productive): """ Per RescueTime API :param very_distracting: integer - number of seconds spent :param distracting: integer - number of seconds spent :param neutral: integer - number of seconds spent :param productive: integer - number of seconds spent :param very_productive: integer - number of seconds spent Per documentation, this is how the productive score is calculated. http://help.rescuetime.com/kb/rescuetime-website/how-is-my-productivity-pulse-calculated """ # which is zero, lol very_distracting_score = very_distracting * 0 distracting_score = distracting * 1 neutral_score = neutral * 2 productive_score = productive * 3 very_productive_score = very_productive * 4 total_score = very_distracting_score + distracting_score + neutral_score + productive_score + very_productive_score total_time_spent = very_distracting + distracting + neutral + productive + very_productive # final multiplier to even things out total_time_spent_scaled = total_time_spent * 4 try: rt_score = total_score / total_time_spent_scaled except ZeroDivisionError: rt_score = 0 return rt_score
28144b5e1c7820b12971a0f5a88d56971c798dc7
703,001
def split_nth(string, count): """ Splits string to equally-sized chunks """ return [string[i:i+count] for i in range(0, len(string), count)]
38833ef711ce04f5563b343f477e7792035ec669
703,003
def deduplicate(elements): """Remove duplicate entries in a list of dataset annotations. Parameters ---------- elements: list(vizier.datastore.annotation.base.DatasetAnnotation) List of dataset annotations Returns ------- list(vizier.datastore.annotation.base.DatasetAnnotation) """ if len(elements) < 2: return elements s = sorted(elements, key=lambda a: (a.identifier, a.obj_type, a.key, a.value)) result = s[:1] for a in s[1:]: elem_l = result[-1] if a.identifier != elem_l.identifier or a.obj_type != elem_l.obj_type or a.key != elem_l.key or a.value != elem_l.value: result.append(a) return result
99de9667b46ab9da619d28748d3d74bbec54892a
703,004
def get_cut_rect_x(rect, axis): """ cuts one rect about an x axis """ rects = [rect] llx, lly, urx, ury = rect if llx < axis and urx > axis: rects = [(llx, lly, axis, ury), (axis, lly, urx, ury)] return rects
98ec23d892b974238d0a064c5e9de044598bb788
703,005
import requests def check_all_links(links): """ Check that the provided links are valid. Links are considered valid if a HEAD request to the server returns a 200 status code. """ broken_links = [] for link in links: head = requests.head(link) if head.status_code != 200: broken_links.append(link) return broken_links
b6e784da72b4f81af3e393804ef3f776c2f3fc85
703,006
def compute_seen_words(inscription_list): """Computes the set of all words seen in phrases in the game""" return {word for inscription in inscription_list for phrase in inscription['phrases'] for word in phrase.split()}
496fc64ee37a6a6b0b3df0c3e9230d7b7ef46d0f
703,008
import argparse def get_args(batch_size=8, image_size=256, max_iter=100000): """ Get command line arguments. Arguments set the default values of command line arguments. """ description = "Example of Lightweight GAN." parser = argparse.ArgumentParser(description) parser.add_argument("-d", "--device-id", type=str, default="0", help="Device id.") parser.add_argument("-c", "--context", type=str, default="cudnn", help="Context.") parser.add_argument("--type-config", "-t", type=str, default='float', help='Type of computation. e.g. "float", "half".') parser.add_argument("--img-path", type=str, default="~/AnimalFace-dog", help="Image path.") parser.add_argument("--image-size", type=int, default=image_size, help="Image size.") parser.add_argument("--batch-size", "-b", type=int, default=batch_size, help="Batch size.") parser.add_argument("--max-iter", "-i", type=int, default=max_iter, help="Max iterations.") parser.add_argument("--save-interval", type=int, default=50000, help="Interval for saving models.") parser.add_argument("--test-interval", type=int, default=5000, help="Interval for testing models.") parser.add_argument("--latent", type=int, default=256, help="Number of latent variables.") parser.add_argument("--monitor-path", type=str, default="./result/tmp", help="Monitor path.") parser.add_argument("--model-load-path", type=str, default=".", help="Path to load parameters from") parser.add_argument("--train-samples", type=int, default=-1, help="Number of data to be used. When -1 is set all data is used.") parser.add_argument("--lr", type=float, default=2e-4, help="Learning rate") parser.add_argument("--aug-list", nargs="+", default=["lrflip", "translation", "color"]) args = parser.parse_args() return args
b9f5a7acc6e95eb112cb33827f65b7acc02d08eb
703,009
import argparse def parse_options(): """Parses command line options and returns an option dictionary.""" options = {} parser = argparse.ArgumentParser( description='Recursively apply Review Board reviews' ' and GitHub pull requests.') parser.add_argument('-d', '--dry-run', action='store_true', help='Perform a dry run.') parser.add_argument('-n', '--no-amend', action='store_true', help='Do not amend commit message.') parser.add_argument('-c', '--chain', action='store_true', help='Recursively apply parent review chain.') parser.add_argument('-s', '--skip-hooks', action='store_true', help='Skip the commit hooks (e.g., Mesos style check).') parser.add_argument('-3', '--3way', dest='three_way', action='store_true', help='Use 3 way merge in git apply.') # Add -g and -r and make them mutually exclusive. group = parser.add_mutually_exclusive_group(required=True) group.add_argument('-g', '--github', metavar='PULL_REQUEST', help='Pull request number') group.add_argument('-r', '--review-id', metavar='REVIEW_ID', help='Numeric review ID') args = parser.parse_args() options['review_id'] = args.review_id options['dry_run'] = args.dry_run options['no_amend'] = args.no_amend options['github'] = args.github options['chain'] = args.chain options['skip_hooks'] = args.skip_hooks options['3way'] = args.three_way return options
6d35277fe508ef43e4631904f5b07d2cc5b947fd
703,011
from typing import List def _get_create_repo(request) -> List[str]: """ Retrieves the list of all GIT repositories to be created. Args: request: The pytest requests object from which to retrieve the marks. Returns: The list of GIT repositories to be created. """ names = request.config.getoption("--create-repo", []) # names.extend(request.node.get_closest_marker("create_repo", [])) # * Split ',' separated lists # * Remove duplicates - see conftest.py::pytest_collection_modifyitems() names = [name for i in names for name in i.split(",")] return list(set(names))
4ac8cefefb75af3bb86fcc16f5c8b79953b136bf
703,012
def update_outputs(region, resource_type, name, outputs): """ update outputs with appropriate results """ element = { "op": "remove", "path": "/%s/%s" % (resource_type, name) } outputs[region].append(element) return outputs
97858e5d183af9974bd31be180dfe05c26048ab3
703,013
def _check_electrification_scenarios_for_download(es): """Checks the electrification scenarios input to :py:func:`download_demand_data` and :py:func:`download_flexibility_data`. :param set/list es: The input electrification scenarios that will be checked. Can be any of: *'Reference'*, *'Medium'*, *'High'*, or *'All'*. :return: (*set*) -- The formatted set of electrification scenarios. :raises TypeError: if es is not input as a set or list, or if the components of es are not input as str. :raises ValueError: if the components of es are not valid. """ # Check that the input is of an appropriate type if not isinstance(es, (set, list)): raise TypeError("Electrification scenarios must be input as a set or list.") # Check that the components of es are str if not all(isinstance(x, str) for x in es): raise TypeError("Individual electrification scenarios must be input as a str.") # Reformat components of es es = {x.capitalize() for x in es} if "All" in es: es = {"Reference", "Medium", "High"} # Check that the components of es are valid if not es.issubset({"Reference", "Medium", "High"}): invalid_es = es - {"Reference", "Medium", "High"} raise ValueError(f'Invalid electrification scenarios: {", ".join(invalid_es)}') # Return the reformatted es return es
ccd1ec8f0b1349267ba1334f7744056bc43e32ec
703,014
def is_shuffle(s1, s2, s3): """ Runtime: O(n) """ if len(s3) != len(s1) + len(s2): return False i1 = i2 = i3 = 0 while i1 < len(s1) and i2 < len(s2): c = s3[i3] if s1[i1] == c: i1 += 1 elif s2[i2] == c: i2 += 1 else: return False i3 += 1 return True
3b88d117efde1d6b8ea8e0266a9c3ac7ae039458
703,015
def header_is_sorted_by_coordinate(header): """Return True if bam header indicates that this file is sorted by coordinate. """ return 'HD' in header and 'SO' in header['HD'] and header['HD']['SO'].lower() == 'coordinate'
b656770806818abe742be32bc14c31a8a8e3e535
703,016
def calc_node_size(self): """ calculate minimum node size. """ title_width = self._text_item.boundingRect().width() port_names_width = 0.0 port_height = 0.0 if self._input_items: input_widths = [] for port, text in self._input_items.items(): input_width = port.boundingRect().width() * 2 if text.isVisible(): input_width += text.boundingRect().width() input_widths.append(input_width) port_names_width += max(input_widths) port = list(self._input_items.keys())[0] port_height = port.boundingRect().height() * 2 if self._output_items: output_widths = [] for port, text in self._output_items.items(): output_width = port.boundingRect().width() * 2 if text.isVisible(): output_width += text.boundingRect().width() output_widths.append(output_width) port_names_width += max(output_widths) port = list(self._output_items.keys())[0] port_height = port.boundingRect().height() * 2 height = port_height * (max([len(self.inputs), len(self.outputs)]) + 2) height += 10 width = max(port_names_width, title_width) return width, height
9d6b8a37ef13a9698d6523baeacc2da0686c9c4d
703,017
def get_fans(tp): """ Get fan_in and fan_out with corresponding slices """ slices_fan_in = {} # fan_in per slice slices_fan_out = {} for weight, instr in zip(tp.weight_views(), tp.instructions): slice_idx = instr[2] mul_1, mul_2, mul_out = weight.shape fan_in = mul_1 * mul_2 fan_out = mul_out slices_fan_in[slice_idx] = (slices_fan_in[slice_idx] + fan_in if slice_idx in slices_fan_in.keys() else fan_in) slices_fan_out[slice_idx] = fan_out return slices_fan_in, slices_fan_out
7fdff84c5129bd22a738653b6676d48c2a5f073d
703,018
def secret_token(): """ Fixture that yields a usable secret token. """ return 'super-secret-token-string'.encode()
90e6c54a18387c64e27fea93912278c126df1585
703,019
import types def is_variable(tup): """ Takes (name, object) tuple, returns True if it is a variable. """ name, item = tup # callable() # 函数用于检查一个对象是否是可调用的。如果返回True,object仍然可能调用失败; # 但如果返回False,调用对象ojbect绝对不会成功。 # 对于函数, 方法, lambda 函式, 类, 以及实现了 __call__ # 方法的类实例, 它都返回 True。 if callable(item): # function or class return False if isinstance(item, types.ModuleType): # imported module return False if name.startswith("_"): # private property return False return True
81055d1ed252160c417b386c875e818b87780f14
703,020
import os import errno def _IsOnDevice(path, st_dev): """Checks if a given path belongs to a FS on a given device. Args: path: a filesystem path, possibly to a non-existent file or directory. st_dev: the ID of a device with a filesystem, as in os.stat(...).st_dev. Returns: True if the path or (if the path does not exist) its closest existing ancestor exists on the device. False if not. """ if not os.path.isabs(path): path = os.path.abspath(path) try: return os.stat(path).st_dev == st_dev except OSError as err: if err.errno == errno.ENOENT: dirname = os.path.dirname(path) if len(dirname) < len(path): return _IsOnDevice(dirname, st_dev) return False
391843553ea49ae7c0998dac5601d5d525890265
703,021
def mps_to_kmph(mps): """ Transform a value from meters-per-second to kilometers-per-hour """ return mps * 3.6
fee133def1727801e5e473d3ffb2df6c7e733a04
703,022
import inspect def behavior(instance_mode="session", instance_creator=None): """ Decorator to specify the server behavior of your Pyro class. """ def _behavior(clazz): if not inspect.isclass(clazz): raise TypeError("behavior decorator can only be used on a class") if instance_mode not in ("single", "session", "percall"): raise ValueError("invalid instance mode: " + instance_mode) if instance_creator and not callable(instance_creator): raise TypeError("instance_creator must be a callable") clazz._pyroInstancing = (instance_mode, instance_creator) return clazz if not isinstance(instance_mode, str): raise SyntaxError("behavior decorator is missing argument(s)") return _behavior
748817411f58cdbce66b2cacdaf0a642183c7963
703,023
import torch def gt2out(gt_bboxes_list, gt_labels_list, inp_shapes_list, stride, categories): """transform ground truth into output format""" batch_size = len(gt_bboxes_list) inp_shapes = gt_bboxes_list[0].new_tensor(inp_shapes_list, dtype=torch.int) output_size = inp_shapes[0] / stride height_ratio, width_ratio = output_size.float() / inp_shapes[0].float() # allocating memory tl_heatmaps = -2 * gt_bboxes_list[0].new_ones(batch_size, categories, output_size[0], output_size[1]) br_heatmaps = -2 * gt_bboxes_list[0].new_ones(batch_size, categories, output_size[0], output_size[1]) ct_heatmaps = -2 * gt_bboxes_list[0].new_ones(batch_size, categories, output_size[0], output_size[1]) tl_regrs = gt_bboxes_list[0].new_zeros(batch_size, 2, output_size[0], output_size[1]) br_regrs = gt_bboxes_list[0].new_zeros(batch_size, 2, output_size[0], output_size[1]) ct_regrs = gt_bboxes_list[0].new_zeros(batch_size, 2, output_size[0], output_size[1]) tl_emds = gt_labels_list[0].new_zeros(batch_size, 1, output_size[0], output_size[1]) br_emds = gt_labels_list[0].new_zeros(batch_size, 1, output_size[0], output_size[1]) for b_ind in range(batch_size): # loop through batch-images for obj_ind, detection in enumerate(gt_bboxes_list[b_ind]): # loop through objects in one image category = gt_labels_list[b_ind][obj_ind] - 1 xtl, ytl = detection[0], detection[1] xbr, ybr = detection[2], detection[3] xct, yct = (detection[2] + detection[0]) / 2., (detection[3] + detection[1]) / 2. fxtl = (xtl * width_ratio) fytl = (ytl * height_ratio) fxbr = (xbr * width_ratio) fybr = (ybr * height_ratio) fxct = (xct * width_ratio) fyct = (yct * height_ratio) xtl = int(fxtl) ytl = int(fytl) xbr = int(fxbr) ybr = int(fybr) xct = int(fxct) yct = int(fyct) # heatmaps tl_heatmaps[b_ind, category, ytl, xtl] = 1 br_heatmaps[b_ind, category, ybr, xbr] = 1 ct_heatmaps[b_ind, category, yct, xct] = 1 # offsets tl_regrs[b_ind, 0, ytl, xtl] = fxtl - xtl # tl_tx tl_regrs[b_ind, 1, ytl, xtl] = fytl - ytl # tl_ty br_regrs[b_ind, 0, ybr, xbr] = fxbr - xbr # br_tx br_regrs[b_ind, 1, ybr, xbr] = fybr - ybr # br_ty ct_regrs[b_ind, 0, yct, xct] = fxct - xct # ct_tx ct_regrs[b_ind, 1, yct, xct] = fyct - yct # ct_ty # embeddings tl_emds[b_ind, 0, ytl, xtl] = 2 br_emds[b_ind, 0, ybr, xbr] = 2 tl_out=(tl_heatmaps, tl_emds, tl_regrs) br_out=(br_heatmaps, br_emds, br_regrs) ct_out=(ct_heatmaps, None, ct_regrs) return tl_out, br_out, ct_out
da3636776f75abc53cc790e0ffd6871fc6a1d7ca
703,024
def create_list_id_title(sheets: list) -> list: """ Args: this function gets a list of all the sheets of a spreadsheet a sheet is represented as a dict format with the following fields "sheets" : [ { "properties": { "sheetId": 0, "title": "Sheet1", "index": 0, "sheetType": "GRID", "gridProperties": { "rowCount": 1000, "columnCount": 26 } } }, ... ] Returns : the output of the function will be a list of dict in the format [{'sheetId' : 123 , 'sheet title': 'title'},...] """ result = [] for sheet in sheets: sheetId = sheet.get('properties').get('sheetId') sheet_title = sheet.get('properties').get('title') result.append({'SheetId': sheetId, 'Sheet title': sheet_title}) return result
a32d2cbfce6f06d326f49e69983e05e67bfc1697
703,025
from pathlib import Path import argparse def dir_abs_path(path_str: str) -> Path: """ Validate directory `path_str` and make it absolute. Arguments: path_str -- A path to check. Returns: An absolute path. Raises: argparse.ArgumentTypeError -- If path is not a directory. """ path = Path(path_str).absolute() if path.exists() and not path.is_dir(): raise argparse.ArgumentTypeError(f"Path {path.as_posix()} is not a directory") return path
8fc8a3c5fbb7555bf6b2e6e76d40b8c32aa4e001
703,026
def parse_glyphs_groups(names, groups): """ Parse a ``gstring`` and a groups dict into a list of glyph names. """ glyph_names = [] for name in names: # group names if name[0] == '@': group_name = name[1:] if group_name in groups: glyph_names += groups[group_name] else: print('project does not have a group called %s.\n' % group_name) # glyph names else: glyph_names.append(name) return glyph_names
50e79acffdc6d26576e8524b52219afad3e40a4e
703,027
import argparse def set_parser(): """ set custom parser """ parser = argparse.ArgumentParser(description="") parser.add_argument("-r", "--regions", nargs='+', required=False, default=['R1', 'R2', 'R3'], help="The regions to train on (default is R1 R2 R3)") parser.add_argument("-m", "--weights_dir", type=str, required=False, default='weights', help="Location to save weights") parser.add_argument("-b", "--batch_size", type=int, required=False, default=12, help="The batch size") parser.add_argument("-w", "--num_workers", type=int, required=False, default=3, help="Number of workers for dataloader") parser.add_argument("-g", "--gpu", type=str, required=False, default='cuda', help="The device 'cuda' for gpu or 'cpu' for cpu") return parser
6f60e197401f6170f1edfb43e88777a59785540d
703,028
def event_fixture(): """Return a received event from the websocket client.""" return { "type": "event", "event": { "source": "node", "event": "value updated", "nodeId": 52, "args": { "commandClassName": "Basic", "commandClass": 32, "endpoint": 0, "property": "currentValue", "newValue": 255, "prevValue": 255, "propertyName": "currentValue", }, }, }
4866641f285ca65003c3dada9c2f406ae2f5d218
703,029
import tempfile import os import requests import tarfile import json import subprocess def run_dcos_engine(dcos_engine_url: str, dcos_engine_template): """ Runs the dcos-engine """ tmpdir = tempfile.mkdtemp() # pull down dcos engine in temp dir download_path = os.path.join(tmpdir, 'download.tar.gz') with open(download_path, 'wb') as f: r = requests.get(dcos_engine_url) for chunk in r.iter_content(1024): f.write(chunk) extract_path = os.path.join(tmpdir, 'extract') with tarfile.open(download_path) as tar: tar.extractall(path=extract_path) extracted_name = dcos_engine_url.split('/')[-1].rstrip('.tar.gz') dcos_engine_bin_path = os.path.join(extract_path, extracted_name, 'dcos-engine') # inject parameters into the JSON (keyhelper, agent definitions) acs_template_path = os.path.join(tmpdir, 'acs_template.json') with open(acs_template_path, 'w') as f: json.dump(dcos_engine_template, f) # run acs vs template cmd = [dcos_engine_bin_path, 'generate', acs_template_path] subprocess.check_call(cmd, cwd=tmpdir) cluster_name = dcos_engine_template['properties']['masterProfile']['dnsPrefix'] with open(os.path.join(tmpdir, '_output/{}/azuredeploy.json'.format(cluster_name)), 'r') as f: arm_template = json.load(f) with open(os.path.join(tmpdir, '_output/{}/azuredeploy.parameters.json'.format(cluster_name)), 'r') as f: arm_template_parameters_raw = json.load(f) arm_template_parameters = dict() for k, v in arm_template_parameters_raw['parameters'].items(): arm_template_parameters[k] = v['value'] return arm_template, arm_template_parameters
755538defc135e9ce3c9af32215cfc9bbabde04e
703,030
def str_id(qualified_name): """Return PROVN representation of a URI qualified name. Params ------ qualified_name : QualifiedName Qualified name for which to return the PROVN string representation. """ return qualified_name.provn_representation().replace("'", "")
ff1cf6614a098818e8a70de105d7bfb7810548dc
703,031
def perc_range(n, min_val, max_val, rounding=2): """ Return percentage of `n` within `min_val` to `max_val` range. The ``rounding`` argument is used to specify the number of decimal places to include after the floating point. Example:: >>> perc_range(40, 20, 60) 50 """ return round( min([1, max([0, n - min_val]) / (max_val - min_val)]) * 100, rounding)
379515f6c0483b4bfed93d0c1012bb2ca111e410
703,032
def sort_sequence_by_key(sequence, key_name, reverse=False): """ often when setting up initial serializations (especially during testing), I pass a list of dictionaries representing a QS to some fn. That list may or may not be sorted according to the underlying model's "order" attribute This fn sorts the list according to the value of "key" in each list item; typically, "key" would match the "order" attribute of the model :param key_name: name of key to sort by :param list: list to sort :return: """ def _sorting_fn(item): # using this fn ensures that 'sort_sequence_by_key' will work # for a list of dictionaries or a list of objects # (the latter is a special use-case; a QS can use the '.order_by' filter, but an actual list of models cannot) try: return item.get(key_name) except AttributeError: return getattr(item, key_name) sorted_sequence = sorted( sequence, key=lambda item: _sorting_fn(item), reverse=reverse, ) return sorted_sequence
fbe46c942ac35d5399450c6bba430a096e6b7503
703,033
def width_from_bitdefs(bitdefs): """ Determine how wide an binary value needs to be based on bitdefs used to define it. Args: bitdefs (list(BitDef)): List of bitdefs to find max width of Returns: (int): Maximum width """ max_index = max([bitdef.end for bitdef in bitdefs]) width = max_index + 1 return width
59503f335d6d427579be730806c738108091e9ed
703,034
import subprocess def build_docs(): """Build docs for package """ subprocess.check_call(['scons', '-f', 'src/SConstruct']) return ['doc/spowtd.1', 'doc/user_guide.pdf']
03415279da466d496f12cc674bce2ee0c0ec73ed
703,035
def get_vectorized_series(text_series, vectorizer): """ 사전 훈련된 벡터화 객체를 사용해 입력 시리즈를 벡터화합니다. :param text_series: 텍스트의 판다스 시리즈 :param vectorizer: 사전 훈련된 sklearn의 벡터화 객체 :return: 벡터화된 특성 배열 """ vectors = vectorizer.transform(text_series) vectorized_series = [vectors[i] for i in range(vectors.shape[0])] return vectorized_series
31c9d550d60443da41277833c2f79be66238951a
703,036
from typing import Sequence import difflib def _validate_magics_with_black(before: Sequence[str], after: Sequence[str]) -> bool: """ Validate the state of the notebook before and after running nbqa with black. Parameters ---------- before Notebook contents before running nbqa with black after Notebook contents after running nbqa with black Returns ------- bool True if validation succeeded else False """ diff = difflib.unified_diff(before, after) result = "".join(i for i in diff if any([i.startswith("+ "), i.startswith("- ")])) expected = ( '- "def compute(operand1,operand2, bin_op):\\n",\n' '+ "def compute(operand1, operand2, bin_op):\\n",\n' '- "compute(5,1, operator.add)"\n' '+ "compute(5, 1, operator.add)"\n' '- " ?str.splitlines"\n' '+ "str.splitlines?"\n' '- " %time randint(5,10)"\n' '+ "%time randint(5,10)"\n' '- "result = str.split??"\n' '+ "str.split??"\n' ) return result == expected
e6e655f2e6ea5e8d055f27e8da14cf0233c0c202
703,037
import requests import json import random def get_gelImage(tags): """Returns pictures from Gelbooru with given tags.""" tags = list(tags) formatted_tags = "" rating = "" ratings = { "re": "rating%3aexplicit", "rq": "rating%3aquestionable", "rs": "rating%3asafe" } if tags: # if there are any tags, check for ratings if tags[0] in ratings: rating = ratings[tags[0]] tags.remove(tags[0]) if rating == "": # if rating wasn't specified, set safe one rating = ratings["rs"] # make tags suitable for Gelbooru API url formatted_tags = "_".join(tags).replace("/", "+") print(rating, formatted_tags) api_url = f"https://gelbooru.com/index.php?page=dapi&s=post&q=index&json=1&limit=50&tags={rating}+{formatted_tags}" response = requests.get(api_url) # parsing json json_api_url = json.loads(response.text) # verify if there is anything within given tags if json_api_url: image = random.choice(json_api_url)["file_url"] return image else: return "No results with given tags or they are incorrect."
1ad643483d96ab53dd217b14dae6cfa5febc3d44
703,038
def deriv_MCP(w, alpha=1., g=3): """Derivative of the MCP, w > 0""" return (w < alpha * g) * (alpha - w / g)
0982f3354650d0cc5a9cbc14dbce9b007f56239b
703,039
import re def parse_stdout_data(stdout): """解析输出 Args: stdout ([byte]): 标准输出 Returns: [list]: 返回解析后的数据 """ if stdout is None: #判断输出是否为None return pattern = r'[\r\n|\r|\n|=]' # 防止不同平台下回车不同,无法正确解析 res = re.split(pattern,stdout.decode('UTF-8').strip()) #依照回车分割字符串 for i,el in enumerate(res): if el.find(":") !=-1: res[i] = re.split(r'[:]',el)[-1] return [el.strip() for el in res]
17f2c191c792ef473ff1c89829488ed1f119db29
703,040
def split(array, nrows, ncols): """Split a matrix into sub-matrices.""" return array.reshape(array.shape[1]//nrows, nrows, -1, ncols).swapaxes(1, 2)
a153ee015cd03b2cbc89d7e434a67bef3740237f
703,041
def row_to_dict(): """Convert pyspark.Row to dict for easier unordered comparison""" def func(row, recursive=True): return row.asDict(recursive=recursive) return func
ebc2395354d07a11895e7c85f0813279fea4630a
703,042
def get_port(): """Returns a port number.""" return 0
97259c825bbe41f47bd5bec81fedf24c5b19a547
703,044
def sort_unique(edges): """Make sure there are no duplicate edges and that for each ``coo_a < coo_b``. """ return tuple(sorted( tuple(sorted(edge)) for edge in set(map(frozenset, edges)) ))
d4f4425b78baed6d822d5559787ced1653986761
703,045
def get_var_names(var_name): """Defines replacement dictionary for the bare variable name and the names derived from it - the optimization flag and the identifier name. """ repl = dict() repl['opt_var_name'] = "Opt_%s"%var_name repl['id_var_name'] = "ID_%s"%var_name repl['var_name'] = var_name return repl
37999ffed0a0df1dbf736ada0cc355080dd9997f
703,046
import sys def progressbar(it, prefix="", size=60, file=sys.stdout): """A super simple progressbar. Args: it ([type]): [description] prefix (str, optional): [description]. Defaults to "". size (int, optional): [description]. Defaults to 60. file ([type], optional): [description]. Defaults to sys.stdout. """ def get_n(j): return int(size * j / count) def show(x, j): file.write(f"{prefix} [{'#' * x}{'.' * (size-x)}] {j:0{n_chars}d}/{count}\r") file.flush() count = len(it) cur_n = 0 n_chars = len(str(count)) show(0, 0) for i, item in enumerate(it): yield item x = get_n(i + 1) if x != cur_n: show(x, i + 1) cur_n = x file.write("\n") file.flush()
cce6beee0d67e0d6be954f52709d0e9aa36c9a07
703,047
def function_grandkids_cell(): """Returns string see usage""" return "\n".join(["F1 is a function", "F1a is a function", "F1a1 is a function", "F1 is composed of F1a", "F1a is composed of F1a1", "a is a data", "F1a produces a", "b is a data", "F1a consumes b", "c is a data", "F1a1 produces c", "d is a data", "F1a1 consumes d", ""])
07b862cd5c5e02a90cba744d7182dab25994be1b
703,048
from typing import List from typing import Dict import re def parse_header_links(value: str) -> List[Dict[str, str]]: """ Returns a list of parsed link headers, for more info see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Link The generic syntax of those is: :: Link: < uri-reference >; param1=value1; param2="value2" So for instance: Link; '<http:/.../front.jpeg>; type="image/jpeg",<http://.../back.jpeg>;' would return :: [ {"url": "http:/.../front.jpeg", "type": "image/jpeg"}, {"url": "http://.../back.jpeg"}, ] .. note:: Stolen code from httpx _utils.py (private method) :param value: HTTP Link entity-header field :return: list of parsed link headers """ links: List[Dict[str, str]] = [] replace_chars = " '\"" value = value.strip(replace_chars) if not value: return links for val in re.split(", *<", value): try: url, params = val.split(";", 1) except ValueError: url, params = val, "" link = {"url": url.strip("<> '\"")} for param in params.split(";"): try: key, value = param.split("=") except ValueError: break link[key.strip(replace_chars)] = value.strip(replace_chars) links.append(link) return links
536d3f2b477666c076ac29312f3acfe63f40e324
703,049
def parseFloat(value, ret=0.0): """ Parses a value as float. This function works similar to its JavaScript-pendant, and performs checks to parse most of a string value as float. :param value: The value that should be parsed as float. :param ret: The default return value if no integer could be parsed. :return: Either the parse value as float, or ret if parsing not possible. """ if value is None: return ret if not isinstance(value, str): value = str(value) conv = "" value = value.strip() dot = False for ch in value: if ch not in "+-0123456789.": break if ch == ".": if dot: break dot = True conv += ch try: return float(conv) except ValueError: return ret
6828cc19882dcbfcf7b83781b1e79a6014034cad
703,050
def discard_inserted_documents(error_documents, original_documents): """Discard any documents that have already been inserted which are violating index constraints such documents will have an error code of 11000 for a DuplicateKey error from https://github.com/mongodb/mongo/blob/master/src/mongo/base/error_codes.yml#L467 Parameters: error_documents (List[Dict]): list of documents that failed to insert in original transaction original_documents (List[Dict]): list of documents from original transaction that failed error_code (Int): error status code to filter on Returns: List[Dict]: list of documents with matching error code entries removed """ # doc['op'] returns the actual document from the previous transaction errors = list(doc['op'] for doc in error_documents if doc['code'] == 11000) return list(doc for doc in original_documents if doc not in errors)
9d7d47a0ade2300449a7f1a4a20c3a70f6dce583
703,051
def list_folder(drive, folder_id): """ Lists contents of a GoogleDriveFile that is a folder :param drive: Drive object to use for getting folders :param folder_id: The id of the GoogleDriveFile :return: The GoogleDriveList of folders """ _q = {'q': "'{}' in parents and trashed=false".format(folder_id)} return drive.ListFile(_q).GetList()
1ea5837d6096f2f9c1f0485d5a02bd43a8b8c55e
703,052
import re def normalize_place_name(value): """Quick and dirty conversion of common verbiage around placenames""" query_value = value query_value = re.sub(r"^Place \w+:", "", query_value) query_value = re.sub(r"^Cultural Place:", "", query_value) query_value = query_value.strip() return query_value
790110ff9381cdce69caffc14362f61936c36cb4
703,053
import struct def htons(cpu_context, func_name, func_args): """ Convert the provided 16-bit number in host byte order (little-endian) to network byte order (big-endian) """ le_port = func_args[0] port_data = struct.pack("<H", le_port) return struct.unpack(">H", port_data)[0]
095be37630fbe0dc86ea5e731180249c29ccac85
703,054
def read_well_list(stream): """Read the well-list.""" retval = {} for line in stream: line = line.rstrip() if line.startswith("Row"): continue tokens = line.split("\t") wbc = tokens[5] retval[wbc] = { "row": tokens[0], "column": tokens[1], "sample": tokens[4].replace(" ", "_")} return retval
65b1782a76c5eddb88a6e6491a72fd2f93d56076
703,055
import json from datetime import datetime def filter_data(data): """ Setup the filter in use by the listen() function. It will take the rsvp event JSON string, and return a JSON string subset of that data. :param: json string :return: json string """ try: result = json.dumps({ 'CITY': data['group']['group_city'], 'LAT' : data['group']['group_lat'], 'LON' : data['group']['group_lon'], 'DATE': datetime.fromtimestamp(int(str(data['event']['time'])[:-3])).strftime('%Y%m%d'), 'EID' : data['event']['event_id'], 'GID' : data['group']['group_id'], 'MID' : data['member']['member_id']}) print(result) return result except KeyError as e: print("ERR: {}".format(e)) except Exception as e: print("ERR: {}".format(e))
f32c6f7907fd6dd64fac443c459864cd62bb737f
703,056
def ordinal_number(n: int): """ Returns a string representation of the ordinal number for `n` e.g., >>> ordinal_number(1) '1st' >>> ordinal_number(4) '4th' >>> ordinal_number(21) '21st' """ # from https://codegolf.stackexchange.com/questions/4707/outputting-ordinal-numbers-1st-2nd-3rd#answer-4712 return'%d%s' % (n, 'tsnrhtdd'[(n // 10 % 10 != 1) * (n % 10 < 4) * (n % 10)::4])
9cb2b333cfe7d4e7b115d21d9d3c1bbaec02cdd9
703,057
def mode_batch_size(mode, hparams): """Returns the batch size for a given mode (train or eval). Args: mode: Either 'train' or 'eval'. hparams: Hyperparameters. Returns: Integer batch size. Raises: ValueError: If mode is not 'train' or 'eval'. """ if mode == 'train': return hparams.batch_size elif mode == 'eval': return hparams.eval_batch_size else: raise ValueError('Invalid --mode=%r' % mode)
42f45e54698a539b27ad764b0c6584fb3620990d
703,059
import os def path_with_ext(dialog, wildcards): """Append extension if necessary.""" _, ext = os.path.splitext(dialog.GetPath()) if not ext: ext = wildcards[dialog.GetFilterIndex()][1] # use first extension ext = ext[1:] # remove leading '*' if ext == '.*': return _ return _ + ext
e39fb990fcb8dbd4711d82ea217752d9007f9ef6
703,060
from typing import Optional import ast def _parse_type_comment( type_comment: Optional[str], ) -> Optional[ast.expr]: """ Attempt to parse a type comment. If it is None or if it fails to parse, return None. """ if type_comment is None: return None try: # pyre-ignore[16]: the ast module stubs do not have full details return ast.parse(type_comment, "<type_comment>", "eval").body except SyntaxError: return None
49aafd9df3d590ccebf1c4b20b0c2a04640a9797
703,061
def parse_dependencies(dataset, use_lemmas=False): """ Recovers Head word, head word TAG and dependencies. """ new_dataset = [] for entry in dataset: if use_lemmas: to_use = "lemmas" else: to_use = "tokens" mapped_head = [entry["features"][to_use][i] for i in entry["features"]["dependency_ref"]] mapped_head_tag = [entry["features"]["tags"][i] for i in entry["features"]["dependency_ref"]] # In the case we're using lemmas, our tokens will be the lemmas... entry["features"]["tokens"] = entry["features"][to_use] entry["features"].pop("dependency_ref"), entry["features"].pop("lemmas") entry["dependency_map"] = [ dependency for dependency in entry["features"]["dependency_map"] if dependency in ["amod", "dep", "nsubj", "dobj"] ] entry["features"]["head"] = mapped_head entry["features"]["head_tag"] = mapped_head_tag new_dataset.append(entry) return new_dataset
0fef2af24cb8cc31a98b5a91b9009ee5975d6432
703,062
from typing import Any import json def try_parse(value: Any): """Attempts to turn a string value into it's Python data representation via json.loads. Has special handling for strings 'True', 'False', and 'None'. :param value: The value. If a string, attempts to parse as JSON, if not or on parse error returns the value itself. :returns: The value. """ if isinstance(value, str): try: return json.loads(value) except json.JSONDecodeError: if value == "True": return True if value == "False": return False if value == "None": return None return value try: return {key: try_parse(val) for key, val in value.items()} except AttributeError: return [try_parse(val) for val in value] except TypeError: return value
db963c34319c8aaee6ac17a52d24dbcf2e5806e5
703,063
def by_deep_time_per_call(stat): """Sorting by inclusive elapsed time per call in descending order.""" return -stat.deep_time_per_call if stat.deep_hits else -stat.deep_time
6419a76a292828d5fd5525998409d175924eea5f
703,064
def is_release(semver): """is a semver a release version""" return not (semver.build or semver.prerelease)
de14f18e4df2bf7fc86a6af10d2ef07f93a8da6b
703,065
def store_film_params(gammas, betas, contrasts, metadata, model, film_layers, depth): """Store FiLM params. Args: gammas (dict): betas (dict): contrasts (list): list of the batch sample's contrasts (eg T2w, T1w) metadata (list): model (nn.Module): film_layers (list): depth (int): Returns: dict, dict: gammas, betas """ new_contrast = [metadata[0][k]['contrast'] for k in range(len(metadata[0]))] contrasts.append(new_contrast) # Fill the lists of gammas and betas for idx in [i for i, x in enumerate(film_layers) if x]: if idx < depth: layer_cur = model.encoder.down_path[idx * 3 + 1] elif idx == depth: layer_cur = model.encoder.film_bottom elif idx == depth * 2 + 1: layer_cur = model.decoder.last_film else: layer_cur = model.decoder.up_path[(idx - depth - 1) * 2 + 1] gammas[idx + 1].append(layer_cur.gammas[:, :, 0, 0].cpu().numpy()) betas[idx + 1].append(layer_cur.betas[:, :, 0, 0].cpu().numpy()) return gammas, betas, contrasts
fcbc1e0cee76753a3edd8f4ca49af0354913c433
703,066
def identity(obj): """ Identity function computing no operation Parameters ---------- obj : object any object Returns ------- obj the input object itself """ return obj
252a44ce3251b74ad25e28bf6bff462f6227f04b
703,067
def get_acq_final_days(df, criteria, correct_amount, session_length): """ This function returns the last days for the Acquisition test. The function grabs all the rows that meet the minimum correct trials amount and the maximum session length amount. Then it calculates the first instance when the animal meets the n days in a row criteria. Then it will return a dataframe that contains all the first instances of the animals that met the criteria of n days in a row with at least the minimum correct amount and finished the test in less than or equal to the maximum session length. :param df: A dataframe that represents the cleaned Acquisition data. :param criteria: The n days in the row that the animal needs to complete with the other criteria :param correct_amount: The minimum required correct trials amount that animal needs to achieve :param session_length: The maximum session length given to the animal to achieve the other criteria :return: df_copy: A dataframe with all the first instances of the animals that met the criteria. """ df['Day'] = df.groupby('ID').cumcount() + 1 df_copy = df.copy(deep=True) df_copy.sort_values(['ID', 'Day'], ascending=[1, 1], inplace=True) df_copy = df_copy.loc[(df_copy['Corrects'] >= correct_amount) & (df_copy['SessionLength'] <= session_length)] df_copy.replace(0, 1, inplace=True) df_copy.sort_values(['ID', 'Day'], inplace=True) df_copy.reset_index(drop=True, inplace=True) row_index = 0 while row_index < df_copy.shape[0] - (criteria - 1): rows_to_sum = list() # compare x amount of rows in a row for sum_numbers in range(criteria): rows_to_add = df_copy.loc[row_index + sum_numbers] while rows_to_add['ID'] != df_copy.at[row_index, 'ID'] and row_index < df_copy.shape[0] - 1: row_index += 1 rows_to_sum.append(rows_to_add) last_row_info = rows_to_sum[-1] if len(rows_to_sum) < criteria: continue if last_row_info['ID'] != rows_to_sum[0]['ID']: continue day_counter = list() for row in rows_to_sum: day_counter.append(row['Day']) # if the days are consecutive, it passes the criteria if day_counter == sorted(range(day_counter[0], day_counter[-1] + 1)): df_copy.at[last_row_info.name, 'Criteria Passed?'] = 'yes' row_index += 1 # only take the first occurrence of the rows that passed the criteria df_copy = df_copy.loc[df_copy['Criteria Passed?'] == 'yes'] df_copy['Mice ID'] = df_copy['ID'] df_copy = df_copy.groupby('Mice ID').first() return df_copy
2e9974fed030e50fdb400c95a7e353e85ad7d89f
703,068
def res_pairs(num_res, nearn): """ computes res-res pairs included for which to calculate minimum distance features state num of residues, and nearest neighbour skip e.g. i+3 is nearn=3 """ res=[] for i in range(num_res-nearn): for j in range(i+nearn,num_res): res.append([i+1,j+1]) return res
909d1e5142e909eef009241548ae92012c17e6f3
703,069
def extract(key, items): """Return the sorted values from dicts using the given key. :param key: Dictionary key :type key: str | unicode :param items: Items to filter. :type items: [dict] :return: Set of values. :rtype: [str | unicode] """ return sorted(item[key] for item in items)
d39b6d42a08ea8e2e7d7488fb00a28243c9a6718
703,070
def page_not_found(e): """whatever you are looking for we don't have. Sorry""" return 'no, it is not here', 404
6013c85c5dec5a22d58b8820c8333859e1ceaa08
703,071
def format_kvps(mapping, prefix=""): """Formats a mapping as key=value pairs. Values may be strings, numbers, or nested mappings. Nested mappings, e.g. host:{ip:'0.0.0.1',name:'the.dude.abides'}, will be handled by prefixing keys in the sub-mapping with the key, e.g.: host.ip=0.0.0.1 host.name=the.dude.abides. """ kvp_list = [] for k, v in mapping.items(): if hasattr(v, "keys"): # nested mapping new_prefix = prefix + "." + k if prefix else k kvps = format_kvps(v, prefix=new_prefix) # format as string kvp_list.append(kvps) continue # already prefixed with key; go to next if v is None: v = "None" elif isinstance(v, int) or isinstance(v, float): v = "{}".format(v) elif " " in v: v = '"' + v.replace('"', '\\"') + '"' if prefix: k = prefix + "." + k kvp_list.append("{}={}".format(k, v)) return " ".join(kvp_list)
4780d5c705a8805331a1d981e87fa3d3dca263a8
703,072
def load_input(filename): """ :param filename: A string representing a text. :return: A set of words representing the text. """ text = filename.lower().replace('?', ' ').replace('!',' ').replace('.',' ').replace('-',' ').replace(':',' ').replace(';',' ').replace(',',' ').replace('(',' ').replace(')',' ').replace('[',' ').replace(']',' ').replace('"',' ').replace("'",' ') words = set(text.split( )) return words
aa291c1fdf625f6380a607263562c3c47a667121
703,073
import torch def project(meta_weights, P, Q): """ project meta_weights to sub_weights Args: meta_weights: a 4-D tensor [cout, cin, k, k], the meta weights for one-shot model; P: a 2-D tensor [cout, cout_p], projection matrix along cout; Q: a 2-D tensor [cin, cin_p], projection matrix along cin; Return: proj_weights: a 4-D tensor [cout_p, cin_p, k, k], the projected weights; """ if meta_weights.ndimension() != 4: raise ValueError("shape error! meta_weights should be 4-D tensors") elif meta_weights.shape[0] != P.shape[0] or meta_weights.shape[1] != Q.shape[0]: raise ValueError("shape mismatch! The projection axises of meta weights, P and Q should be consistent.") proj_weights = torch.einsum('ijhw,ix,jy->xyhw', meta_weights, P, Q) return proj_weights
a693d51a4a74358bd831954c46bb8cb993dabf66
703,074
def lj_sanity_test(): """Sanity test to make sure we even have the plugin fixtures installed.""" return "sanity"
ddd4fee2ad3b8b3f81c3a2821ce5708d22d05d89
703,075
def _is_not_blank(line): """Return true if `line` is not blank.""" return len(line.split())>0
835d991d71dcb59075b6ae2b317c8fb8c51abee9
703,076
def overlaps(box1, box2): """ Checks whether two boxes have any overlap. Args: box1: (float, float, float, float) Box coordinates as (x0, y0, x1, y1). box2: (float, float, float, float) Box coordinates as (x0, y0, x1, y1). Returns: bool True if there is any overlap between given boxes. """ if not ((box1[0] <= box2[0] <= box1[2]) or (box1[0] <= box2[2] <= box1[2]) or (box2[0] <= box1[0] and box2[2] >= box1[2])): return False if not ((box1[1] <= box2[1] <= box1[3]) or (box1[1] <= box2[3] <= box1[3]) or (box2[1] <= box1[1] and box2[3] >= box1[3])): return False return True
b8bf96e87f45f24d337b503184670d0f56d209e0
703,077
import binascii def hashInt(bytebuffer): """ Map a long hash string to an int smaller than power(2, 31)-1 """ hex_dig = binascii.hexlify(bytebuffer) return int(hex_dig, 16) % 2147483647
d89a20651175a023e844e5c14dd860fcd8f186ee
703,078
import os def write_files(directory, entries, organize): """Write a list of file entries to a directory.""" # (filename, data) bad_entries = [] # Get absolute path, just in case working directory changes. directory = os.path.abspath(directory) # Make top-level. os.makedirs(directory, exist_ok=True) # Prepare extensions for translation into directories. extensions = set() if organize: # Collect extensions. for entry in entries: extensions.add(os.path.splitext(entry[0])[1][1:]) # Create subdirectories. for extension in set(extensions): try: os.makedirs(os.path.join(directory, extension), exist_ok=True) except OSError: # Remove bad extensions. extensions.remove(extension) print('Could not make directory "{extension}. \ Files with this extension will be in the top-level directory \ if possible.'.format(extension=extension)) # Write files. for entry in entries: file_extension = os.path.splitext(entry[0])[1][1:] if file_extension in extensions: file_name = os.path.join(directory, file_extension, entry[0]) else: file_name = os.path.join(directory, entry[0]) try: with open(file_name, 'wb') as open_file: open_file.write(entry[1]) except OSError: bad_entries.append(entry) print('Bad filename "' + file_name + '". File not written.') return bad_entries
572832c4ea4efcbd1bcb1365a492716068e2a48d
703,079
def to_sendeable_block_original_bus(array_of_msgs): """Given an array of msgs return it in the format panda.can_send_many expects. Input looks like: [(512, bytearray(b'>\x80\x1c'), 0), ...] Output looks like: [(512, '>\x80\x1c', 0), ...] """ new_arr = [] for msg in array_of_msgs: new_arr.append((msg[0], msg[1], str(msg[2]), msg[3])) return new_arr
24fd1cdf8c1bfed095d1685a8eaa246c89425958
703,080
def extractColorRamp(color_ramp): """Make a curve from color ramp data""" # for uniformity looks like a glTF animation sampler curve = { 'input' : [], 'output' : [], 'interpolation' : ('STEP' if color_ramp.interpolation == 'CONSTANT' else 'LINEAR') } for e in color_ramp.elements: curve['input'].append(e.position) for i in range(4): curve['output'].append(e.color[i]) return curve
7fb71e94c1d687065c5c015524b6739588fa047a
703,081
def create_distance_callback(data, manager): """Creates callback to return distance between points.""" distances_ = {} index_manager_ = manager # precompute distance between location to have distance callback in O(1) for from_counter, from_node in enumerate(data['locations']): distances_[from_counter] = {} for to_counter, to_node in enumerate(data['locations']): if from_counter == to_counter: distances_[from_counter][to_counter] = 0 else: distances_[from_counter][to_counter] = ( abs(from_node[0] - to_node[0]) + abs(from_node[1] - to_node[1])) def distance_callback(from_index, to_index): """Returns the manhattan distance between the two nodes.""" # Convert from routing variable Index to distance matrix NodeIndex. from_node = index_manager_.IndexToNode(from_index) to_node = index_manager_.IndexToNode(to_index) return distances_[from_node][to_node] return distance_callback # [END distance_callback]
eff81290aa396fa07a6f45ce137258a12695dfa1
703,082
def max_sub_array(nums): """Given an integer array nums, find the contiguous subarray (containing at least one number) which has the largest sum and return its sum.""" max_sum = float("-inf")# unbounded lower value # https://stackoverflow.com/questions/34264710/what-is-the-point-of-floatinf-in-python # max_sum = sum(nums) # max_sum=nums[0] running_sum = 0 for num in nums: if num + running_sum > num: # accumulating the maximum value possible of the running sum running_sum = num + running_sum else: running_sum = num # resets the running sum to num>running_sum, i.e. if num>= running_sum+num if running_sum > max_sum: # a running sum can only replace the max_sum, if it is clearly greater than its value max_sum = running_sum return max_sum
cb7ea6a3eda54d7566430e38acee7e5999618af8
703,083
import yaml def _GetErrorDetailsSummary(error_info): """Returns a string summarizing `error_info`. Attempts to interpret error_info as an error JSON returned by the Apigee management API. If successful, the returned string will be an error message from that data structure - either its top-level error message, or a list of precondition violations. If `error_info` can't be parsed, or has no known error message, returns a YAML formatted copy of `error_info` instead. Args: error_info: a dictionary containing the error data structure returned by the Apigee Management API. """ try: if "details" in error_info: # Error response might have info on exactly what preconditions failed or # what about the arguments was invalid. violations = [] for item in error_info["details"]: # Include only those details whose format is known. detail_types = ( "type.googleapis.com/google.rpc.QuotaFailure", "type.googleapis.com/google.rpc.PreconditionFailure", "type.googleapis.com/edge.configstore.bundle.BadBundle", ) if item["@type"] in detail_types and "violations" in item: violations += item["violations"] descriptions = [violation["description"] for violation in violations] if descriptions: return error_info["message"] + "\n" + yaml.dump(descriptions) # Almost always seems to be included. return error_info["message"] except KeyError: # Format of the error details is not as expected. As a fallback, just give # the user the whole thing. return "\n" + yaml.dump(error_info)
276e8520bc5ca790fc61c71e0f43ebee0d78b597
703,084
def state_to_trip(trip_state, kp_state): """The function state_to_trip was added to have a uniform way of using the state between kinpy and TriP in the tests Args: state (dict): dictionary with the state name of each joint as it occurs in TriP as the key and a float as the value Returns: (dict): the input dictionary where the keys are adjusted to the form that kinpy states use """ # this now sets the trip states to be equal to the previously generated kinpy states state = { joint_name: kp_state["_".join(joint_name.split("_")[:-2])] for joint_name in trip_state.keys() } return state
ca6e5e756fcb862c3c704f8d9fbdf39899cbe6bb
703,085
def format_duration_hhmm(d): """ Utility function to format durations in the widget as hh:mm """ if d is None: return '' elif isinstance(d, str): return d hours = d.days * 24 + d.seconds // 3600 minutes = int(round((d.seconds % 3600) / 60)) return '{}:{:02}'.format(hours, minutes)
941670b1a16c87816589cfb05ff80b651857d337
703,086
def try_int(obj): """return int(obj), or original obj if failed""" try: return int(obj) except ValueError: # invalid literal for int() return obj
21613ce19e86f5c5545e9dc4b161f6ddc95fd0ce
703,088
def gb_predict(X, model): """Apply a gradient boosting model using a standard function. It is a wrapper function.""" y = model.predict(X) return y
755844678e0478d3678856c35c6e5d94027fe30b
703,089
def _anonymous_model_data(ops_data): """Returns a dict representing an anonymous model. ops_data must be a dict representing the model operations. It will be used unmodified for the model `operations` attribute. """ return {"model": "", "operations": ops_data}
6b64f9098b30cf3e079311b75ca136cfc2f7038f
703,090
def encode_to_dict(encoded_str): """ 将encode后的数据拆成dict >>> encode_to_dict('name=foo') {'name': foo'} >>> encode_to_dict('name=foo&val=bar') {'name': 'foo', 'val': 'var'} """ pair_list = encoded_str.split('&') d = {} for pair in pair_list: if pair: key = pair.split('=')[0] val = pair.split('=')[1] d[key] = val return d
a3af4d93d13404f01511483621e166c50d9e489e
703,091
def taxonomy_levels_below(taxa_level): """ E.g. 'Order' --> ['Family', 'Genus'] """ p_levels = ['Kingdom', 'Phylum', 'Class', 'Order', 'Family', 'Genus'] position_of_taxa_level = p_levels.index(taxa_level) return p_levels[position_of_taxa_level + 1:]
57e41b0042daa14b8ed09469006c6c3bdac320ec
703,092
def sort(nodes, total_order, dedup=False): """Sorts nodes according to order provided. Args: nodes: nodes to sort total_order: list of nodes in correct order dedup: if True, also discards duplicates in nodes Returns: Iterable of nodes in sorted order. """ total_order_idx = {} for i, node in enumerate(total_order): total_order_idx[node] = i if dedup: nodes = set(nodes) return sorted(nodes, key=lambda n: total_order_idx[n])
4cfb53593b3c862ab918507ea3d4fe94ab4899df
703,093
def bet_wit_cv(b_d, w_d): """ 引数:2つのディクショナリ 返値:double型のクラス間分散,クラス内分散の値のタプル """ bc_var = b_d['n'] * w_d['n'] * ((b_d['ave'] - w_d['ave']) ** 2) / ((b_d['n'] + w_d['n']) ** 2) wc_var = (b_d['n'] * b_d['var'] + w_d['n'] * w_d['var'] ) / (b_d['n'] + w_d['n']) return bc_var, wc_var
a4b34fd4a4a713ab1f3350201a8c47d35e75e6eb
703,094
import subprocess def call(cmd, check=True, cwd=None): """Execute a command and return output and status code. If 'check' is True, an exception will be raised if the command exists with an error code. If 'cwd' is set, the command will execute in that directory.""" p = subprocess.Popen(cmd, stdout=subprocess.PIPE, cwd=cwd) stdout = p.communicate()[0] returncode = p.poll() if check and returncode != 0: raise Exception("Call failed with errorcode %s: %s" % (returncode, stdout)) return stdout, returncode
5ee896062dd1dd5ca0618327a133d4d03c4cb57c
703,095
def _test_against_patterns(patterns, entity_id): """Test entity against list of patterns, true if any match.""" for pattern in patterns: if pattern.match(entity_id): return True return False
16881be06e86ae3fe1afc9dc8fb642573cf6fdcf
703,097
def get_schema_variables(schema): """ This method returns a set of all variables within the schemas.yml. The set is organised by: (variable_name, locator_method, script, file_name:sheet_name) If the variable is from an input database, the script is replaced by "-" Also, if the variable is not from a tree data shape (such as xlsx or xls), the 'file_name:sheet_name' becomes 'file_name' only. The sheet_name is important to consider as a primary key for each variable can only be made through combining the 'file_name:sheet_name' and 'variable_name'. Along with the locator_method, the set should contain all information necessary for most tasks. """ schema_variables = set() for locator_method in schema: # if there is no script mapped to 'created_by', it must be an input_file # replace non-existent script with the name of the file without the extension if not schema[locator_method]['created_by']: script = "-" else: script = schema[locator_method]['created_by'][0] if not "schema" in schema[locator_method] or not schema[locator_method]["schema"]: print("Could not find schema for {locator_method}".format(locator_method=locator_method)) continue # for repetitive variables, include only one instance for variable in schema[locator_method]['schema']: if variable.find('srf') != -1: variable = variable.replace(variable, 'srf0') if variable.find('PIPE') != -1: variable = variable.replace(variable, 'PIPE0') if variable.find('NODE') != -1: variable = variable.replace(variable, 'NODE0') if variable.find('B0') != -1: variable = variable.replace(variable, 'B001') # if the variable is one associated with an epw file: exclude for now if schema[locator_method]['file_type'] == 'epw': variable = 'EPW file variables' # if the variable is actually a sheet name due to tree data shape if schema[locator_method]['file_type'] in {'xlsx', 'xls'}: worksheet = variable for variable_in_sheet in schema[locator_method]['schema'][worksheet]: file_name = "{file_path}:{worksheet}".format(file_path=schema[locator_method]['file_path'], worksheet=worksheet) schema_variables.add((variable_in_sheet, locator_method, script, file_name)) # otherwise create the meta set else: file_name = schema[locator_method]['file_path'] schema_variables.add((variable, locator_method, script, file_name)) return schema_variables
0dfeae2a3d28a1e2457a6b3da7e84a3e201fe6e7
703,098