content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import struct import lzma def decompress_lzma(data: bytes) -> bytes: """decompresses lzma-compressed data :param data: compressed data :type data: bytes :raises _lzma.LZMAError: Compressed data ended before the end-of-stream marker was reached :return: uncompressed data :rtype: bytes """ props, dict_size = struct.unpack("<BI", data[:5]) lc = props % 9 props = props // 9 pb = props // 5 lp = props % 5 dec = lzma.LZMADecompressor( format=lzma.FORMAT_RAW, filters=[ { "id": lzma.FILTER_LZMA1, "dict_size": dict_size, "lc": lc, "lp": lp, "pb": pb, } ], ) return dec.decompress(data[5:])
247c3d59d45f3f140d4f2c36a7500ff8a51e45b0
706,567
def get_case_number(caselist): """Get line number from file caselist.""" num = 0 with open(caselist, 'r') as casefile: for line in casefile: if line.strip().startswith('#') is False: num = num + 1 return num
b1366d8e4a0e2c08da5265502d2dd2d72bf95c19
706,568
def _parseList(s): """Validation function. Parse a comma-separated list of strings.""" return [item.strip() for item in s.split(",")]
5bf9ac50a44a18cc4798ed616532130890803bac
706,569
def splitDataSet(dataSet, index, value): """ 划分数据集,取出index对应的值为value的数据 dataSet: 待划分的数据集 index: 划分数据集的特征 value: 需要返回的特征的值 """ retDataSet = [] for featVec in dataSet: if featVec[index] == value: reducedFeatVec = featVec[:index] reducedFeatVec.extend(featVec[index+1:]) retDataSet.append(reducedFeatVec) # 返回index列为value的数据集(去除index列) return retDataSet
814a54fe13d832e69d8df32af52d882d4a15c4ba
706,570
def in_skill_product_response(handler_input): """Get the In-skill product response from monetization service.""" """ # type: (HandlerInput) -> Union[InSkillProductsResponse, Error] """ locale = handler_input.request_envelope.request.locale ms = handler_input.service_client_factory.get_monetization_service() return ms.get_in_skill_products(locale)
9452ac1498ff0e6601df9fc419df0cfdd6b9171e
706,572
def stat_mtime(stat): """Returns the mtime field from the results returned by os.stat().""" return stat[8]
1f7fec9a54a97bb63141d63db706b2885913dadb
706,573
def calc_very_restricted_wage_distribution(df): """Compute per-period mean and std of wages for agents under two choice restrictions.""" return ( df.query("Policy == 'veryrestricted' and Choice == 'a' or Choice == 'b'") .groupby(["Period"])["Wage"] .describe()[["mean", "std"]] )
3ca8a2f0061e456a3158b4ee8a128a5a7439af3f
706,574
def msort(liste, indice): """ This function sorts a vector regarding values of the indice 'indice' Indice start from 0 """ tmp = [[tbl[indice]]+[tbl] for tbl in liste] tmp.sort() liste = [cl[1] for cl in tmp] del tmp return liste
7f4caff9a74f4d118877e335513e68ecd54986d8
706,576
def _Net_forward(self, blobs=None, start=None, end=None, **kwargs): """ Forward pass: prepare inputs and run the net forward. Take blobs: list of blobs to return in addition to output blobs. kwargs: Keys are input blob names and values are blob ndarrays. For formatting inputs for Caffe, see Net.preprocess(). If None, input is taken from data layers. start: optional name of layer at which to begin the forward pass end: optional name of layer at which to finish the forward pass (inclusive) Give outs: {blob name: blob ndarray} dict. """ if blobs is None: blobs = [] if start is not None: start_ind = list(self._layer_names).index(start) else: start_ind = 0 if end is not None: end_ind = list(self._layer_names).index(end) outputs = set([end] + blobs) else: end_ind = len(self.layers) - 1 outputs = set(self.outputs + blobs) if kwargs: if set(kwargs.keys()) != set(self.inputs): raise Exception('Input blob arguments do not match net inputs.') # Set input according to defined shapes and make arrays single and # C-contiguous as Caffe expects. for in_, blob in kwargs.items(): if blob.ndim != 4: raise Exception('{} blob is not 4-d'.format(in_)) if blob.shape[0] != self.blobs[in_].num: raise Exception('Input is not batch sized') self.blobs[in_].data[...] = blob self._forward(start_ind, end_ind) # Unpack blobs to extract return {out: self.blobs[out].data for out in outputs}
790baa0fc8529e3cad45bd8236060bad591ab4a4
706,577
import os def MakeCommandName(name): """adds '.exe' if on Windows""" if os.name == 'nt': return name + '.exe' return name
39aba823ee6de6fb6d550dd7bfbdaf310010278b
706,579
import os def is_path_parent(possible_parent, *paths): """ Return True if a path is the parent of another, False otherwise. Multiple paths to test can be specified in which case all specified test paths must be under the parent in order to return True. """ def abs_path_trailing(pth): pth_abs = os.path.abspath(pth) if not pth_abs.endswith(os.sep): pth_abs += os.sep return pth_abs possible_parent_abs = abs_path_trailing(possible_parent) if not paths: return False for path in paths: path_abs = abs_path_trailing(path) if not path_abs.startswith(possible_parent_abs): return False return True
23ae7ab4cf2aaf4935613501a5344cf811313e7b
706,580
def cmpversion(a, b): """Compare versions the way chrome does.""" def split_version(v): """Get major/minor of version.""" if '.' in v: return v.split('.', 1) if '_' in v: return v.split('_', 1) return (v, '0') a_maj, a_min = split_version(a) b_maj, b_min = split_version(b) if a_maj == b_maj: return cmpversion(a_min, b_min) return int(a_maj) > int(b_maj)
226191f2a72d4cb65198ddcb779b130b7a524034
706,581
from datetime import datetime def create_nav_btn(soup,date,text): """ Helper functions for month_calendar, generates a navigation button for calendar :param soup: BeautifulSoup parser of document :param date: Date to create nav button :param text: Text for button """ nav_th = soup.new_tag('th',attrs=[('colspan','2')]) nav_th['class'] = 'month' nav_a = soup.new_tag('a',href='/apps/orders/%s/%s' % (date.year, date.month)) nav_a.string = text if date > datetime.today(): nav_a['class'] = "btn btn-mini btn-info disabled" nav_a['href'] = '#' else: nav_a['class'] = "btn btn-mini btn-info" nav_th.insert(0,nav_a) return nav_th
6f49e5173980a9da01e4d92e2f5adfeb73a4a4d0
706,582
import re def parse_name(content): """ Finds the name of the man page. """ # Create regular expression name_regex = re.compile(r"^([\w\.-]*)") # Get name of manual page just_name = name_regex.search(content) name_str = "" if just_name is not None: name_str = just_name.group(1) return name_str
c3a1f32beb96d39d4490681bf90d54115597ffe5
706,584
def albanian_input_normal(field, text): """ Prepare a string from one of the query fields for subsequent processing: replace common shortcuts with valid Albanian characters. """ if field not in ('wf', 'lex', 'lex2', 'trans_ru', 'trans_ru2'): return text text = text.replace('ё', 'ë') text = text.replace('e:', 'ë') return text
6bd4e7a1e764feada04ae5e95465fb4d7cbb29fb
706,585
import re def extract_share_id_from_url(public_base_url: str) -> str: """ Extracts the Airtable share id from the provided URL. :param public_base_url: The URL where the share id must be extracted from. :raises ValueError: If the provided URL doesn't match the publicly shared Airtable URL. :return: The extracted share id. """ result = re.search(r"https:\/\/airtable.com\/shr(.*)$", public_base_url) if not result: raise ValueError( f"Please provide a valid shared Airtable URL (e.g. " f"https://airtable.com/shrxxxxxxxxxxxxxx)" ) return f"shr{result.group(1)}"
5aad99b5bf022a2b957f10fcb09793188051340c
706,586
def adjust_learning_rate(optimizer, epoch, args): """Sets the learning rate to the initial LR decayed by 10 every 30 epochs""" if args.lr_policy == 'decay': lr = args.lr * (args.lr_decay ** epoch) elif args.lr_policy == 'poly': interval = len([x for x in args.lr_custom_step if epoch >= x]) epoch = epoch if interval == 0 else epoch - args.lr_custom_step[interval-1] if interval == 0: step = args.lr_custom_step[0] elif interval >= len(args.lr_custom_step): step = args.epochs - args.lr_custom_step[interval-1] else: step = args.lr_custom_step[interval] - args.lr_custom_step[interval-1] lr = args.eta_min + (args.lr - args.eta_min) * (1 - epoch * 1.0 /step)** args.lr_decay elif args.lr_policy == 'fix': lr = args.lr elif args.lr_policy == 'fix_step': lr = args.lr * (args.lr_decay ** (epoch // args.lr_fix_step)) elif args.lr_policy in ['custom_step', 'sgdr_step']: interval = len([x for x in args.lr_custom_step if epoch >= x]) lr = args.lr *(args.lr_decay ** interval) else: return None if optimizer is not None and args.lr_policy != 'sgdr_step': for param_group in optimizer.param_groups: if param_group.get('lr_constant', None) is not None: continue param_group['lr'] = lr return lr
c247c81a90476e737ff54c2a7ca45f5c42dccd38
706,587
def find_file_start(chunks, pos): """Find a chunk before the one specified which is not a file block.""" pos = pos - 1 while pos > 0: if chunks[pos][0] != 0x100 and chunks[pos][0] != 0x102: # This is not a block return pos else: pos = pos - 1 return pos
b0fb280a847dea3cd589d59863888d1087d4982f
706,588
def get_all_ann_index(self): """ Retrieves all annotation ids """ return list(self.ann_infos.keys())
4375c9dbc14bf50575c8a5e42ce0ae8749820dfb
706,589
def get_pairs(l, k): """ Given a list L of N unique positive integers, returns the count of the total pairs of numbers whose difference is K. First, each integer is stored into a dictionary along with its frequency. Then, for each integer I in the input list, the presence of the integer I+K is checked within the dictionary. The approach may be generalized to the case of non-unique positive integers. The computational time complexity of the algorithm is O(N). :param k: the given difference :type k: int :param l: the list of input integers :type l: list :return: the count of the total pairs of numbers whose difference is k :rtype: int """ hash_map = dict((i, 1) for i in l) return len([1 for i in l if hash_map.get(i + k)])
90fd199c75431c1d20076cea04358b3ca5872810
706,590
def get_quoted_text(text): """Method used to get quoted text. If body/title text contains a quote, the first quote is considered as the text. :param text: The replyable text :return: The first quote in the text. If no quotes are found, then the entire text is returned """ lines = text.split('\n\n') for line in lines: if line.startswith('>'): return line[1:] return text
3ac1801edcaf16af45d118918cb548f41d9a08fb
706,591
def get_username_for_os(os): """Return username for a given os.""" usernames = {"alinux2": "ec2-user", "centos7": "centos", "ubuntu1804": "ubuntu", "ubuntu2004": "ubuntu"} return usernames.get(os)
579ebfa4e76b6660d28afcc010419f32d74aa98c
706,592
from typing import List def is_negative_spec(*specs: List[List[str]]) -> bool: """ Checks for negative values in a variable number of spec lists Each spec list can have multiple strings. Each string within each list will be searched for a '-' sign. """ for specset in specs: if specset: for spec in specset: if '-' in spec: return True return False
216e6db2e63a657ac95a31896b9b61329a10a3db
706,593
def loadEvents(fname): """ Reads a file that consists of first column of unix timestamps followed by arbitrary string, one per line. Outputs as dictionary. Also keeps track of min and max time seen in global mint,maxt """ events = [] ws = open(fname, 'r').read().splitlines() events = [] for w in ws: ix = w.find(' ') # find first space, that's where stamp ends stamp = int(w[:ix]) str = w[ix+1:] events.append({'t': stamp, 's': str}) # except Exception as e: # print ('%s probably does not exist, setting empty events list.' % (fname, )) # print ('error was:', e) return events
495dbd5d47892b953c139b27b1f20dd9854ea29a
706,594
from datetime import datetime import json import hashlib def map_aircraft_to_record(aircrafts, message_now, device_id): """ Maps the `aircraft` entity to a BigQuery record and its unique id. Returns `(unique_ids, records)` """ def copy_data(aircraft): result = { 'hex': aircraft.get('hex'), 'squawk': aircraft.get('squawk'), 'flight': aircraft.get('flight'), 'lat': aircraft.get('lat'), 'lon': aircraft.get('lon'), 'nucp': aircraft.get('nucp'), 'seen_pos': aircraft.get('seen_pos'), 'altitude': aircraft.get('altitude'), 'vert_rate': aircraft.get('vert_rate'), 'track': aircraft.get('track'), 'speed': aircraft.get('speed'), 'messages': aircraft.get('messages'), 'seen': aircraft.get('seen'), 'rssi': aircraft.get('rssi'), 'device_id': device_id, 'timestamp': datetime.utcfromtimestamp(float(message_now)).isoformat() } result_json = json.dumps(result) result_hash = hashlib.sha512(result_json.encode('utf-8')).hexdigest() unique_id = f'{message_now}_{result_hash}' result['created_at'] = datetime.now().isoformat() return (unique_id, result) return zip( *map( copy_data, aircrafts ) )
d423b87e2018486de076cc94a719038c53c54602
706,596
def sub(xs, ys): """ Computes xs - ys, such that elements in xs that occur in ys are removed. @param xs: list @param ys: list @return: xs - ys """ return [x for x in xs if x not in ys]
8911bb2c79919cae88463a95521cf051828038e8
706,597
def input_fn(request_body, request_content_type): """ An input_fn that loads the pickled tensor by the inference server of SageMaker. The function deserialize the inference request, then the predict_fn get invoked. Does preprocessing and returns a tensor representation of the source sentence ready to give to the model to make inference. :param request_body: str The request body :param request_content_type: type The request body type. :return: torch.Tensor """ if request_content_type == 'application/json': return None return 'WHAT HAPPEN TO YOU !'
62d45e188d5537eaa566bd4b90bdb8abc7626621
706,598
def get_colors(k): """ Return k colors in a list. We choose from 7 different colors. If k > 7 we choose colors more than once. """ base_colors = ['b', 'r', 'g', 'c', 'm', 'y', 'k'] colors = [] index = 1 for i in range(0, k): if index % (len(base_colors) + 1) == 0: index = 1 colors.append(base_colors[index - 1]) index += 1 return colors
6c4a38eb394254f57d8be9fca47e0b44f51f5f04
706,599
import argparse def _parse_args() -> argparse.Namespace: """Parses and returns the command line arguments.""" parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('in_json', type=argparse.FileType('r'), help='The JSON file containing a list of file names ' 'that the prefix map operations should be applied to') parser.add_argument( '--prefix-map-json', type=argparse.FileType('r'), required=True, help= 'JSON file containing an array of prefix map transformations to apply ' 'to the strings before tokenizing. These string literal ' 'transformations are of the form "from=to". All strings with the ' 'prefix `from` will have the prefix replaced with `to`. ' 'Transformations are applied in the order they are listed in the JSON ' 'file.') parser.add_argument('--output', type=argparse.FileType('w'), help='File path to write transformed paths to.') return parser.parse_args()
108f2ab7d962fa31a99158997f832f46b8b8d6f8
706,600
def get_channels(posts): """ <summary> Returns post channel (twitter/facebook)</summary> <param name="posts" type="list"> List of posts </param> <returns> String "twitter" or "facebook" </returns> """ channel = [] for i in range(0, len(posts['post_id'])): if len(posts['post_text'][i]) <= 140: channel.append("twitter") else: channel.append("facebook") return channel
2bd67d13079ce115263ac46856d8a708f461cb7e
706,601
def ConvertToTypeEnum(type_enum, airflow_executor_type): """Converts airflow executor type string to enum. Args: type_enum: AirflowExecutorTypeValueValuesEnum, executor type enum value. airflow_executor_type: string, executor type string value. Returns: AirflowExecutorTypeValueValuesEnum: the executor type enum value. """ return type_enum(airflow_executor_type)
04162b04719031ba6b96d981a7ffe8a82691bc31
706,602
import os import pkgutil def get_data(cfg, working_dir, global_parameters, res_incl=None, res_excl=None): """Reads experimental measurements""" exp_type = global_parameters['experiment_type'] path = os.path.dirname(__file__) pkgs = [ modname for _, modname, ispkg in pkgutil.iter_modules([path]) if ispkg and modname in exp_type ] if pkgs: pkg = max(pkgs) else: exit("\nUnknown data type {:s}" "\nDid you forget _cpmg, _cest, etc?" "\n".format(global_parameters['experiment_type'])) reading = __import__( '.'.join([pkg, 'reading']), globals(), locals(), ['get_data'], -1 ) data = reading.read_data(cfg, working_dir, global_parameters, res_incl, res_excl) return data
826314fe99b6ba6dc408c7b0109536ae5fdc0acb
706,603
def index(web): """The web.request.params is a dictionary, pointing to falcon.Request directly.""" name = web.request.params["name"] return f"Hello {name}!\n"
b717ac60d42b8161ed27f7e4156d8a5a03aea803
706,604
def _parseLocalVariables(line): """Accepts a single line in Emacs local variable declaration format and returns a dict of all the variables {name: value}. Raises ValueError if 'line' is in the wrong format. See http://www.gnu.org/software/emacs/manual/html_node/File-Variables.html """ paren = '-*-' start = line.find(paren) + len(paren) end = line.rfind(paren) if start == -1 or end == -1: raise ValueError("%r not a valid local variable declaration" % (line,)) items = line[start:end].split(';') localVars = {} for item in items: if len(item.strip()) == 0: continue split = item.split(':') if len(split) != 2: raise ValueError("%r contains invalid declaration %r" % (line, item)) localVars[split[0].strip()] = split[1].strip() return localVars
39dc5130f47589e111e4b894cf293d446ac0eac0
706,605
def __clean_datetime_value(datetime_string): """Given""" if datetime_string is None: return datetime_string if isinstance(datetime_string, str): x = datetime_string.replace("T", " ") return x.replace("Z", "") raise TypeError("Expected datetime_string to be of type string (or None)")
77afef31056365a47ea821de7a4979cb061920dc
706,606
import os def create_folder(path): """ Creates a folder if not already exists Args: :param path: The folder to be created Returns :return: True if folder was newly created, false if folder already exists """ if not os.path.exists(path): os.makedirs(path) return True else: return False
9b6cfaed256001aa15c15cb535fce54ddcf20bc8
706,607
def is_member(musicians, musician_name): """Return true if named musician is in musician list; otherwise return false. Parameters: musicians (list): list of musicians and their instruments musician_name (str): musician name Returns: bool: True if match is made; otherwise False. """ i = 0 # counter while i < len(musicians): # guard against infinite loop musician = musicians[i].split(', ')[0].lower() if musician_name.lower() == musician: return True # preferable to break statement i += 1 # MUST INCREMENT return False
6ef5b9bbccb17d9b97a85e3af7789e059829184b
706,608
def _to_sequence(x): """shape batch of images for input into GPT2 model""" x = x.view(x.shape[0], -1) # flatten images into sequences x = x.transpose(0, 1).contiguous() # to shape [seq len, batch] return x
bb3b0bb478c924b520bf7bf991a028cf8aaea25f
706,609
def combine_per_choice(*args): """ Combines two or more per-choice analytics results into one. """ args = list(args) result = args.pop() new_weight = None new_averages = None while args: other = args.pop() for key in other: if key not in result: result[key] = other[key] else: old_weight, old_averages = result[key] other_weight, other_averages = other[key] if ( new_averages and set(old_averages.keys()) != set(new_averages.keys()) ): raise ValueError( "Can't combine per-choice results which used different sets of " "player models." ) new_weight = old_weight + other_weight new_averages = {} for pmn in old_averages: new_averages[pmn] = ( old_averages[pmn] * old_weight + other_averages[pmn] * other_weight ) / new_weight result[key] = (new_weight, new_averages) return result
63e482a60b521744c94d80b0b8a740ff74f4b197
706,610
import string def prepare_input(dirty: str) -> str: """ Prepare the plaintext by up-casing it and separating repeated letters with X's """ dirty = "".join([c.upper() for c in dirty if c in string.ascii_letters]) clean = "" if len(dirty) < 2: return dirty for i in range(len(dirty) - 1): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(clean) & 1: clean += "X" return clean
5c55ba770e024b459d483fd168978437b8d48c21
706,611
def get_signed_value(bit_vector): """ This function will generate the signed value for a given bit list bit_vector : list of bits """ signed_value = 0 for i in sorted(bit_vector.keys()): if i == 0: signed_value = int(bit_vector[i]) else: signed_value += ((2 << 7) << (int(i) - 1)) * int(bit_vector[i]) return signed_value
6b2b9a968576256738f396eeefba844561e2d2c7
706,613
from typing import List import sys def _clean_sys_argv(pipeline: str) -> List[str]: """Values in sys.argv that are not valid option values in Where """ reserved_opts = {pipeline, "label", "id", "only_for_rundate", "session", "stage", "station", "writers"} return [o for o in sys.argv[1:] if o.startswith("--") and o[2:].split("=")[0] not in reserved_opts]
551f4fdca5d7cc276b03943f3679cc2eff8ce89e
706,614
def get_number_from_user_input(prompt: str, min_value: int, max_value: int) -> int: """gets a int integer from user input""" # input loop user_input = None while user_input is None or user_input < min_value or user_input > max_value: raw_input = input(prompt + f" ({min_value}-{max_value})? ") try: user_input = int(raw_input) if user_input < min_value or user_input > max_value: print("Invalid input, please try again") except ValueError: print("Invalid input, please try again") return user_input
c9df4ac604b3bf8f0f9c2a35added1f23e88048e
706,615
def process_domain_assoc(url, domain_map): """ Replace domain name with a more fitting tag for that domain. User defined. Mapping comes from provided config file Mapping in yml file is as follows: tag: - url to map to tag - ... A small example domain_assoc.yml is included """ if not domain_map: return url for key in domain_map: if url in domain_map[key]: return key return url
29c0f81a4959d97cd91f839cbe511eb46872b5ec
706,616
import random def shuffled(iterable): """Randomly shuffle a copy of iterable.""" items = list(iterable) random.shuffle(items) return items
cd554d4a31e042dc1d2b4c7b246528a5184d558e
706,617
def parse_discontinuous_phrase(phrase: str) -> str: """ Transform discontinuous phrase into a regular expression. Discontinuity is interpreted as taking place at any whitespace outside of terms grouped by parentheses. That is, the whitespace indicates that anything can be in between the left side and right side. Example 1: x1 (x2 (x3"x4")) becomes x1.+(x2 (x3|x4)) """ level = 0 parsed_phrase = "" for index, char in enumerate(phrase): if char == "(": level += 1 elif char == ")": level -= 1 elif char == " " and level == 0: char = ".+" parsed_phrase += char return parsed_phrase
58fe394a08931e7e79afc00b9bb0e8e9981f3c81
706,618
def name_standard(name): """ return the Standard version of the input word :param name: the name that should be standard :return name: the standard form of word """ reponse_name = name[0].upper() + name[1:].lower() return reponse_name
65273cafaaa9aceb803877c2071dc043a0d598eb
706,619
def getChildElementsListWithTagAttribValueMatch(parent, tag, attrib, value): """ This method takes a parent element as input and finds all the sub elements (children) containing specified tag and an attribute with the specified value. Returns a list of child elements. Arguments: parent = parent element tag = tag value of the sub-element(child) to be searched for. attrib = attribute name for the sub-element with above given tag should have. value = attribute value that the sub-element with above given tag, attribute should have. """ child_elements = parent.findall(".//%s[@%s='%s']" % (tag, attrib, value)) return child_elements
cae87e6548190ad0a675019b397eeb88289533ee
706,620
def not_numbers(): """Non-numbers for (i)count.""" return [None, [1, 2], {-3, 4}, (6, 9.7)]
31f935916c8463f6192d0b2770c1034ee70a4fc5
706,621
def default_validate(social_account): """ Функция по-умолчанию для ONESOCIAL_VALIDATE_FUNC. Ничего не делает. """ return None
634382dbfe64eeed38225f8dca7e16105c40f7c2
706,622
def recursive_dict_of_lists(d, helper=None, prev_key=None): """ Builds dictionary of lists by recursively traversing a JSON-like structure. Arguments: d (dict): JSON-like dictionary. prev_key (str): Prefix used to create dictionary keys like: prefix_key. Passed by recursive step, not intended to be used. helper (dict): In case d contains nested dictionaries, you can specify a helper dictionary with 'key' and 'value' keys to specify where to look for keys and values instead of recursive step. It helps with cases like: {'action': {'type': 'step', 'amount': 1}}, by passing {'key': 'type', 'value': 'amount'} as a helper you'd get {'action_step': [1]} as a result. """ d_o_l = {} if helper is not None and helper['key'] in d.keys() and helper['value'] in d.keys(): if prev_key is not None: key = f"{prev_key}_{helper['key']}" else: key = helper['key'] if key not in d_o_l.keys(): d_o_l[key] = [] d_o_l[key].append(d[helper['value']]) return d_o_l for k, v in d.items(): if isinstance(v, dict): d_o_l.update(recursive_dict_of_lists(v, helper=helper, prev_key=k)) else: if prev_key is not None: key = f'{prev_key}_{k}' else: key = k if key not in d_o_l.keys(): d_o_l[key] = [] if isinstance(v, list): d_o_l[key].extend(v) else: d_o_l[key].append(v) return d_o_l
c615582febbd043adae6788585d004aabf1ac7e3
706,623
def same_shape(shape1, shape2): """ Checks if two shapes are the same Parameters ---------- shape1 : tuple First shape shape2 : tuple Second shape Returns ------- flag : bool True if both shapes are the same (same length and dimensions) """ if len(shape1) != len(shape2): return False for i in range(len(shape1)): if shape1[i] != shape2[i]: return False return True
9452f7973e510532cee587f2bf49a146fb8cc46e
706,624
from typing import List from typing import Any def reorder(list_1: List[Any]) -> List[Any]: """This function takes a list and returns it in sorted order""" new_list: list = [] for ele in list_1: new_list.append(ele) temp = new_list.index(ele) while temp > 0: if new_list[temp - 1] > new_list[temp]: new_list[temp - 1], new_list[temp] = new_list[temp], new_list[temp-1] else: break temp = temp - 1 return new_list
2e7dad8fa138b1a9a140deab4223eea4a09cdf91
706,625
def is_extended_markdown(view): """True if the view contains 'Markdown Extended' syntax'ed text. """ return view.settings().get("syntax").endswith( "Markdown Extended.sublime-syntax")
5c870fd277910f6fa48f2b8ae0dfd304fdbddff0
706,626
def start_end_epoch(graph): """ Start epoch of graph. :return: (start epoch, end epoch). """ start = 0 end = 0 for e in graph.edges_iter(): for _, p in graph[e[0]][e[1]].items(): end = max(end, p['etime_epoch_secs']) if start == 0: start = p['stime_epoch_secs'] else: start = min(start, p['stime_epoch_secs']) return (start, end)
724726ec83d3a98539eed859ec584c6f1adb8567
706,627
def is_point_in_rect(point, rect): """Checks whether is coordinate point inside the rectangle or not. Rectangle is defined by bounding box. :type point: list :param point: testing coordinate point :type rect: list :param rect: bounding box :rtype: boolean :return: boolean check result """ x0, y0, x1, y1 = rect x, y = point if x0 <= x <= x1 and y0 <= y <= y1: return True return False
d0c7a64138899f4e50b42dc75ea6030616d4dfec
706,628
from os.path import expanduser def parse_pgpass(hostname='scidb2.nersc.gov', username='desidev_admin'): """Read a ``~/.pgpass`` file. Parameters ---------- hostname : :class:`str`, optional Database hostname. username : :class:`str`, optional Database username. Returns ------- :class:`str` A string suitable for creating a SQLAlchemy database engine, or None if no matching data was found. """ fmt = "postgresql://{3}:{4}@{0}:{1}/{2}" try: with open(expanduser('~/.pgpass')) as p: lines = p.readlines() except FileNotFoundError: return None data = dict() for l in lines: d = l.strip().split(':') if d[0] in data: data[d[0]][d[3]] = fmt.format(*d) else: data[d[0]] = {d[3]: fmt.format(*d)} if hostname not in data: return None try: pgpass = data[hostname][username] except KeyError: return None return pgpass
929b705fa8a753f773321e47c73d096ffb4bd171
706,629
def convert_timestamp(ts): """Converts the timestamp to a format suitable for Billing. Examples of a good timestamp for startTime, endTime, and eventTime: '2016-05-20T00:00:00Z' Note the trailing 'Z'. Python does not add the 'Z' so we tack it on ourselves. """ return ts.isoformat() + 'Z'
6b8d19671cbeab69c398508fa942e36689802cdd
706,630
import re def address_split(address, env=None): """The address_split() function splits an address into its four components. Address strings are on the form detector-detectorID|device-deviceID, where the detectors must be in dir(xtc.DetInfo.Detector) and device must be in (xtc.DetInfo.Device). @param address Full data source address of the DAQ device @param env Optional env to dereference an alias into an address @return Four-tuple of detector name, detector ID, device, and device ID """ # pyana m = re.match( r"^(?P<det>\S+)\-(?P<det_id>\d+)\|(?P<dev>\S+)\-(?P<dev_id>\d+)$", address) if m is not None: return (m.group('det'), m.group('det_id'), m.group('dev'), m.group('dev_id')) # psana m = re.match( r"^(?P<det>\S+)\.(?P<det_id>\d+)\:(?P<dev>\S+)\.(?P<dev_id>\d+)$", address) if m is not None: return (m.group('det'), m.group('det_id'), m.group('dev'), m.group('dev_id')) # psana DetInfo string m = re.match( r"^DetInfo\((?P<det>\S+)\.(?P<det_id>\d+)\:(?P<dev>\S+)\.(?P<dev_id>\d+)\)$", address) if m is not None: return (m.group('det'), m.group('det_id'), m.group('dev'), m.group('dev_id')) if env is not None: # Try to see if this is a detector alias, and if so, dereference it. Code from psana's Detector/PyDetector.py amap = env.aliasMap() alias_src = amap.src(address) # string --> DAQ-style psana.Src # if it is an alias, look up the full name if amap.alias(alias_src) != '': # alias found address = str(alias_src) return address_split(address) return (None, None, None, None)
c5d362c7fc6121d64ec6a660bcdb7a9b4b532553
706,632
def can_create_election(user_id, user_info): """ for now, just let it be""" return True
06c8290b41b38a840b7826173fd65130d38260a7
706,633
def boolean(input): """Convert the given input to a boolean value. Intelligently handles boolean and non-string values, returning as-is and passing to the bool builtin respectively. This process is case-insensitive. Acceptable values: True * yes * y * on * true * t * 1 False * no * n * off * false * f * 0 :param input: the value to convert to a boolean :type input: any :returns: converted boolean value :rtype: bool """ try: input = input.strip().lower() except AttributeError: return bool(input) if input in ('yes', 'y', 'on', 'true', 't', '1'): return True if input in ('no', 'n', 'off', 'false', 'f', '0'): return False raise ValueError("Unable to convert {0!r} to a boolean value.".format(input))
09c09206d5487bf02e3271403e2ba67358e1d148
706,634
def create_provisioned_product_name(account_name: str) -> str: """ Replaces all space characters in an Account Name with hyphens, also removes all trailing and leading whitespace """ return account_name.strip().replace(" ", "-")
743e7438f421d5d42c071d27d1b0fa2a816a9b4d
706,635
def release_branch_name(config): """ build expected release branch name from current config """ branch_name = "{0}{1}".format( config.gitflow_release_prefix(), config.package_version() ) return branch_name
0d97c515aca8412882c8b260405a63d20b4b0f63
706,636
def torch2numpy(data): """ Transfer data from the torch tensor (on CPU) to the numpy array (on CPU). """ return data.numpy()
c7ca4123743c4f054d809f0e307a4de079b0af10
706,637
def edges_to_adj_list(edges): """ Transforms a set of edges in an adjacency list (represented as a dictiornary) For UNDIRECTED graphs, i.e. if v2 in adj_list[v1], then v1 in adj_list[v2] INPUT: - edges : a set or list of edges OUTPUT: - adj_list: a dictionary with the vertices as keys, each with a set of adjacent vertices. """ adj_list = {} # store in dictionary for v1, v2 in edges: if v1 in adj_list: # edge already in it adj_list[v1].add(v2) else: adj_list[v1] = set([v2]) if v2 in adj_list: # edge already in it adj_list[v2].add(v1) else: adj_list[v2] = set([v1]) return adj_list
683f10e9a0a9b8a29d63b276b2e550ebe8287a05
706,638
def default_mutable_arguments(): """Explore default mutable arguments, which are a dangerous game in themselves. Why do mutable default arguments suffer from this apparent problem? A function's default values are evaluated at the point of function definition in the defining scope. In particular, we can examine these bindings by printing append_twice.__defaults__ after append_twice has been defined. For this function, we have print(append_twice.__defaults__) # ([],) If a binding for `lst` is not supplied, then the `lst` name inside append_twice falls back to the array object that lives inside append_twice.__defaults__. In particular, if we update `lst` in place during one function call, we have changed the value of the default argument. That is, print(append_twice.__defaults__) # ([], ) append_twice(1) print(append_twice.__defaults__) # ([1, 1], ) append_twice(2) print(append_twice.__defaults__) # ([1, 1, 2, 2], ) In each case where a user-supplied binding for `lst is not given, we modify the single (mutable) default value, which leads to this crazy behavior. """ def append_twice(a, lst=[]): """Append a value to a list twice.""" lst.append(a) lst.append(a) return lst print(append_twice(1, lst=[4])) # => [4, 1, 1] print(append_twice(11, lst=[2, 3, 5, 7])) # => [2, 3, 5, 7, 11, 11] print(append_twice(1)) # => [1, 1] print(append_twice(2)) # => [1, 1, 2, 2] print(append_twice(3))
a58a8c2807e29af68d501aa5ad4b33ad1aa80252
706,639
def get_user_messages(user, index=0, number=0): """ 返回指定user按时间倒序的从index索引开始的number个message """ if not user or user.is_anonymous or index < 0 or number < 0: return tuple() # noinspection PyBroadException try: if index == 0 and number == 0: all_message = user.messages.all() else: all_message = user.messages.all()[index:index+number] except Exception as e: all_message = tuple() return all_message
bb0c499e5ca8ec650d2ebca12852d2345733e882
706,640
import argparse from pathlib import Path def parse_args(): """Parse input arguments.""" parser = argparse.ArgumentParser(description="Map gleason data to standard format.") parser.add_argument("-d", "--data_path", type=Path, help="Path to folder with the data.", required=True) parser.add_argument("-n", "--n_jobs", type=int, help="Number of jobs to run in parallel.", required=True) return parser.parse_args()
0a1a1cb404d74c48d550642e31399410d1bd13c3
706,641
def read_simplest_expandable(expparams, config): """ Read expandable parameters from config file of the type `param_1`. Parameters ---------- expparams : dict, dict.keys, set, or alike The parameter names that should be considered as expandable. Usually, this is a module subdictionary of `type_simplest_ep`. config : dict, dict.keys, set, or alike The user configuration file. Returns ------- set of str The parameters in `config` that comply with `expparams`. """ new = set() for param in config: try: name, idx = param.split("_") except ValueError: continue if idx.isdigit() and name in expparams: new.add(param) return new
4e2068e4a6cbca050da6a33a24b5fb0d2477e4e3
706,642
def _hexsplit(string): """ Split a hex string into 8-bit/2-hex-character groupings separated by spaces""" return ' '.join([string[i:i+2] for i in range(0, len(string), 2)])
672e475edeaafaa08254845e620b0a771b294fa8
706,643
from typing import List def hello_world(cities: List[str] = ["Berlin", "Paris"]) -> bool: """ Hello world function. Arguments: - cities: List of cities in which 'hello world' is posted. Return: - success: Whether or not function completed successfully. """ try: [print("Hello {}!".format(c)) for c in cities] # for loop one-liner return True except KeyboardInterrupt: return False finally: pass
a24f0f47c9b44c97f46524d354fff0ed9a735fe3
706,644
import os def makeFolder(path): """Build a folder. Args: path (str): Folder path. Returns: bool: Creation status. """ if(not os.path.isdir(path)): try: os.makedirs(path) except OSError as error: print("Directory %s can't be created (%s)" % (path, error)) return False else: return True else: return False
4bd1535fb3ffc69f5638b6cfbeaf90a1ccbdf2f9
706,645
def team_to_repos(api, no_repos, organization): """Create a team_to_repos mapping for use in _add_repos_to_teams, anc create each team and repo. Return the team_to_repos mapping. """ num_teams = 10 # arrange team_names = ["team-{}".format(i) for i in range(num_teams)] repo_names = ["some-repo-{}".format(i) for i in range(num_teams)] for name in team_names: organization.create_team(name, permission="pull") for name in repo_names: organization.create_repo(name) team_to_repos = { team_name: [repo_name] for team_name, repo_name in zip(team_names, repo_names) } return team_to_repos
390da146c3f96c554f9194f8551a066eec535533
706,646
import struct def padandsplit(message): """ returns a two-dimensional array X[i][j] of 32-bit integers, where j ranges from 0 to 16. First pads the message to length in bytes is congruent to 56 (mod 64), by first adding a byte 0x80, and then padding with 0x00 bytes until the message length is congruent to 56 (mod 64). Then adds the little-endian 64-bit representation of the original length. Finally, splits the result up into 64-byte blocks, which are further parsed as 32-bit integers. """ origlen = len(message) padlength = 64 - ((origlen - 56) % 64) # minimum padding is 1! message += b"\x80" message += b"\x00" * (padlength - 1) message += struct.pack("<Q", origlen * 8) assert (len(message) % 64 == 0) return [ [ struct.unpack("<L", message[i + j:i + j + 4])[0] for j in range(0, 64, 4) ] for i in range(0, len(message), 64) ]
ea06a3fc91e19ed0dbea6ddcc2ee6d554fb5a40f
706,647
def extract_coords(filename): """Extract J2000 coordinates from filename or filepath Parameters ---------- filename : str name or path of file Returns ------- str J2000 coordinates """ # in case path is entered as argument filename = filename.split("/")[-1] if "/" in filename else filename # to check whether declination is positive or negative plus_minus = "+" if "+" in filename else "-" # extracting right acesnsion (ra) and declination(dec) from filename filename = filename.split("_")[0].strip("J").split(plus_minus) ra_extracted = [ "".join(filename[0][0:2]), "".join(filename[0][2:4]), "".join(filename[0][4:]), ] dec_extracted = [ "".join(filename[1][0:2]), "".join(filename[1][2:4]), "".join(filename[1][4:]), ] coordinates = " ".join(ra_extracted) + " " + plus_minus + " ".join(dec_extracted) # return coordinates as a string in HH MM SS.SSS format return coordinates
57f0ca79223116caa770a1dbea2eda84df146855
706,648
def _parse_multi_header(headers): """ Parse out and return the data necessary for generating ZipkinAttrs. Returns a dict with the following keys: 'trace_id': str or None 'span_id': str or None 'parent_span_id': str or None 'sampled_str': '0', '1', 'd', or None (defer) """ parsed = { "trace_id": headers.get("X-B3-TraceId", None), "span_id": headers.get("X-B3-SpanId", None), "parent_span_id": headers.get("X-B3-ParentSpanId", None), "sampled_str": headers.get("X-B3-Sampled", None), } # Normalize X-B3-Flags and X-B3-Sampled to None, '0', '1', or 'd' if headers.get("X-B3-Flags") == "1": parsed["sampled_str"] = "d" if parsed["sampled_str"] == "true": parsed["sampled_str"] = "1" elif parsed["sampled_str"] == "false": parsed["sampled_str"] = "0" if parsed["sampled_str"] not in (None, "1", "0", "d"): raise ValueError("Got invalid X-B3-Sampled: %s" % parsed["sampled_str"]) for k in ("trace_id", "span_id", "parent_span_id"): if parsed[k] == "": raise ValueError("Got empty-string %r" % k) if parsed["trace_id"] and not parsed["span_id"]: raise ValueError("Got X-B3-TraceId but not X-B3-SpanId") elif parsed["span_id"] and not parsed["trace_id"]: raise ValueError("Got X-B3-SpanId but not X-B3-TraceId") # Handle the common case of no headers at all if not parsed["trace_id"] and not parsed["sampled_str"]: raise ValueError() # won't trigger a log message return parsed
2ac3d0cbee196385e970bcc85827c1a467b5bb3b
706,649
def brand_profitsharing_order_query(self, transaction_id, out_order_no, sub_mchid): """查询连锁品牌分账结果 :param transaction_id: 微信支付订单号,示例值:'4208450740201411110007820472' :param out_order_no: 商户分账单号,只能是数字、大小写字母_-|*@,示例值:'P20150806125346' :param sub_mchid: 子商户的商户号,由微信支付生成并下发。示例值:'1900000109' """ if sub_mchid: path = '/v3/brand/profitsharing/orders?sub_mchid=%s' % sub_mchid else: raise Exception('sub_mchid is not assigned.') if transaction_id and out_order_no: path = '%s&transaction_id=%s&out_order_no=%s' % (transaction_id, out_order_no) else: raise Exception('transaction_id or out_order_no is not assigned.') return self._core.request(path)
cb1af072f2b4f94f632817baff6cdfea66110873
706,650
def get_controller_from_module(module, cname): """ Extract classes that inherit from BaseController """ if hasattr(module, '__controller__'): controller_classname = module.__controller__ else: controller_classname = cname[0].upper() + cname[1:].lower() + 'Controller' controller_class = module.__dict__.get(controller_classname, None) return controller_class
b450105f6ec38a03fe461c5d9c07c4652da0efd3
706,651
def get_ogheader(blob, url=None): """extract Open Graph markup into a dict The OG header section is delimited by a line of only `---`. Note that the page title is not provided as Open Graph metadata if the image metadata is not specified. """ found = False ogheader = dict() for line in blob.split('\n'): if line == '---': found = True break if line.startswith('image: '): toks = line.split() assert len(toks) == 2 ogheader['image'] = toks[1] if not found: ogheader = dict() # Ignore any matches as false positives return ogheader if url is not None: assert 'url' not in ogheader ogheader['url'] = url for line in blob.split('\n'): if line.startswith('# '): ogheader['title'] = line[2:] return ogheader
4edd7c5545ddef241ee2bfd5e316e47a336aaa3f
706,652
import time import os def get_log_filename(log_directory, device_name, name_prefix=""): """Returns the full path of log filename using the information provided. Args: log_directory (path): to where the log file should be created. device_name (str): to use in the log filename name_prefix (str): string to prepend to the start of the log file. Returns: str: Path to log filename using the information provided. """ log_timestamp = time.strftime("%Y%m%d-%H%M%S") if name_prefix: log_file_name = "{}-{}-{}.txt".format(name_prefix, device_name, log_timestamp) else: log_file_name = "{}-{}.txt".format(device_name, log_timestamp) return os.path.join(log_directory, log_file_name)
48c0540c9717e54ab4389a2c9f6a5e31696c4595
706,653
import re def condense_colors(svg): """Condense colors by using hexadecimal abbreviations where possible. Consider using an abstract, general approach instead of hard-coding. """ svg = re.sub('#000000', '#000', svg) svg = re.sub('#ff0000', '#f00', svg) svg = re.sub('#00ff00', '#0f0', svg) svg = re.sub('#0000ff', '#00f', svg) svg = re.sub('#00ffff', '#0ff', svg) svg = re.sub('#ff00ff', '#f0f', svg) svg = re.sub('#ffff00', '#ff0', svg) svg = re.sub('#ffffff', '#fff', svg) svg = re.sub('#cc0000', '#c00', svg) svg = re.sub('#00cc00', '#0c0', svg) svg = re.sub('#0000cc', '#00c', svg) svg = re.sub('#00cccc', '#0cc', svg) svg = re.sub('#cc00cc', '#c0c', svg) svg = re.sub('#cccc00', '#cc0', svg) svg = re.sub('#cccccc', '#ccc', svg) svg = re.sub('#999999', '#999', svg) svg = re.sub('#808080', 'grey', svg) return svg
413f1d7c69a52384fc21ee6f8eda6f2a63833e66
706,654
def rgb(r=0, g=0, b=0, mode='RGB'): """ Convert **r**, **g**, **b** values to a `string`. :param r: red part :param g: green part :param b: blue part :param string mode: ``'RGB | %'`` :rtype: string ========= ============================================================= mode Description ========= ============================================================= ``'RGB'`` returns a rgb-string format: ``'rgb(r, g, b)'`` ``'%'`` returns percent-values as rgb-string format: ``'rgb(r%, g%, b%)'`` ========= ============================================================= """ def percent(value): value = int(value) if value < 0: value = 0 if value > 100: value = 100 return value if mode.upper() == 'RGB': return "rgb(%d,%d,%d)" % (int(r) & 255, int(g) & 255, int(b) & 255) elif mode == "%": # see http://www.w3.org/TR/SVG11/types.html#DataTypeColor # percentage is an 'integer' value return "rgb(%d%%,%d%%,%d%%)" % (percent(r), percent(g), percent(b)) else: raise ValueError("Invalid mode '%s'" % mode)
563b8fe8273ce4534567687df01cebe79b9f58dc
706,655
def load_csv_translations(fname, pfx=''): """ Load translations from a tab-delimited file. Add prefix to the keys. Return a dictionary. """ translations = {} with open(fname, 'r', encoding='utf-8-sig') as fIn: for line in fIn: line = line.strip('\r\n ') if len(line) <= 2 or line.count('\t') != 1: continue key, value = line.split('\t') key = pfx + key translations[key] = value return translations
e8b4707fe5eeb0f0f4f4859bd9a5f2272387a022
706,656
def refines_constraints(storage, constraints): """ Determines whether with the storage as basis for the substitution map there is a substitution that can be performed on the constraints, therefore refining them. :param storage: The storage basis for the substitution map :param constraints: The constraint list containing the expressions to be substituted. :return: True if the substitution would change the constraint list. """ storage_names = ["storage[" + str(key) + "]" for key, _ in storage.items()] for name in storage_names: for constraint in constraints: if name in constraint.slot_names: return True return False
de82087c41d95240ee9d15bd51810b7c5594ef0f
706,657
def normalize(data, train_split): """ Get the standard score of the data. :param data: data set :param train_split: number of training samples :return: normalized data, mean, std """ mean = data[:train_split].mean(axis=0) std = data[:train_split].std(axis=0) return (data - mean) / std, mean, std
cfc45ac5bd6ae7a30169253a1ae3ed64c1bd1118
706,659
def get_type_dict(kb_path, dstc2=False): """ Specifically, we augment the vocabulary with some special words, one for each of the KB entity types For each type, the corresponding type word is added to the candidate representation if a word is found that appears 1) as a KB entity of that type, """ type_dict = {'R_restaurant':[]} kb_path_temp = kb_path fd = open(kb_path_temp,'r') for line in fd: if dstc2: x = line.replace('\n','').split(' ') rest_name = x[1] entity = x[2] entity_value = x[3] else: x = line.split('\t')[0].split(' ') rest_name = x[1] entity = x[2] entity_value = line.split('\t')[1].replace('\n','') if rest_name not in type_dict['R_restaurant']: type_dict['R_restaurant'].append(rest_name) if entity not in type_dict.keys(): type_dict[entity] = [] if entity_value not in type_dict[entity]: type_dict[entity].append(entity_value) return type_dict
cd35054505c429cc1ad17eabe1cafb1aa6b38a1f
706,660
def merge_dicts(iphonecontrollers, ipadcontrollers): """Add ipad controllers to the iphone controllers dict, but never overwrite a custom controller with None!""" all_controllers = iphonecontrollers.copy() for identifier, customclass in ipadcontrollers.items(): if all_controllers.get(identifier) is None: all_controllers[identifier] = customclass return all_controllers
10638e775d6578e2553ff5b2b47aff8a17051c7e
706,661
def perimRect(length,width): """ Compute perimiter of rectangle >>> perimRect(2,3) 10 >>> perimRect(4, 2.5) 13.0 >>> perimRect(3, 3) 12 >>> """ return 2*(length+width)
50fdd92430352f443d313d0931bab50ad5617622
706,662
def add_vary_callback_if_cookie(*varies): """Add vary: cookie header to all session responses. Prevent downstream web serves to accidentally cache session set-cookie reponses, potentially resulting to session leakage. """ def inner(request, response): vary = set(response.vary if response.vary is not None else []) vary |= set(varies) response.vary = vary return inner
ee7949f8c6ba1c11784b2c460e3c9dd962473412
706,665
def simple_list(li): """ takes in a list li returns a sorted list without doubles """ return sorted(set(li))
1e36f15cea4be4b403f0a9795a2924c08b2cb262
706,666
import json def write_json(object_list, metadata,num_frames, out_file = None): """ """ classes = ["person","bicycle","car","motorbike","NA","bus","train","truck"] # metadata = { # "camera_id": camera_id, # "start_time":start_time, # "num_frames":num_frames, # "frame_rate":frame_rate # } data = {} for frame_num in range(0,num_frames): frame_data = [] # for each object for i in range(0,len(object_list)): obj = object_list[i] # see if coordinate will be in range if obj.first_frame <= frame_num: if obj.first_frame + len(obj.all) > frame_num: veh_data = {} idx = frame_num - obj.first_frame veh_data["id_num"] = i veh_data["class"] = classes[int(obj.cls)] veh_data["detected"] = obj.tags[idx] veh_data["image_position"] = (obj.all[idx]).tolist() veh_data["world_position"] = (obj.all_world[idx]).tolist() veh_data["gps_position"] = (obj.all_gps[idx]).tolist() frame_data.append(veh_data) data[frame_num] = frame_data all_data = { "metadata":metadata, "data":data } if out_file is not None: with open(out_file, 'w') as fp: json.dump(all_data, fp) return all_data
8b224af4edbd31570a432a8c551e95cd7a002818
706,667
import copy def _clean_root(tool_xml): """XSD assumes macros have been expanded, so remove them.""" clean_tool_xml = copy.deepcopy(tool_xml) to_remove = [] for macros_el in clean_tool_xml.getroot().findall("macros"): to_remove.append(macros_el) for macros_el in to_remove: clean_tool_xml.getroot().remove(macros_el) return clean_tool_xml
9df0980265b26a2de1c88d2999f10cd5d1421e0b
706,668
import json import zlib import base64 def convert_gz_json_type(value): """Provide an ArgumentParser type function to unmarshal a b64 gz JSON string. """ return json.loads(zlib.decompress(base64.b64decode(value)))
1cf0300f40c8367b9129f230a7fef0c9b89ba012
706,669
import string import random def get_random_string(length: int) -> str: """ Returns a random string starting with a lower-case letter. Later parts can contain numbers, lower- and uppercase letters. Note: Random Seed should be set somewhere in the program! :param length: How long the required string must be. length > 0 required. :return: a randomly created string :raises: ValueError for zero and negative length """ if length < 1: raise ValueError("Random Strings must have length 1 minimum.") # choose from all lowercase letter letters = string.ascii_letters + string.digits first_letter = random.choice(string.ascii_lowercase) result_str = ''.join(random.choice(letters) for i in range(length - 1)) return first_letter + result_str
6cf20ce7d158ac158ffa49cac427c396cfd840db
706,671
def factorial(n): """ Return n! - the factorial of n. >>> factorial(1) 1 >>> factorial(0) 1 >>> factorial(3) 6 """ if n<=0: return 0 elif n==1: return 1 else: return n*factorial(n-1)
da5bc6f68375c7db03b7b2bdac1fec2b476ba563
706,672
def get_ffmpeg_folder(): # type: () -> str """ Returns the path to the folder containing the ffmpeg executable :return: """ return 'C:/ffmpeg/bin'
4708eec64ff56b72f7b1b9cc7f5ee7916f6310bd
706,673
def create_bag_of_vocabulary_words(): """ Form the array of words which can be conceived during the game. This words are stored in hangman/vocabulary.txt """ words_array = [] file_object = open("./hangman/vocabulary.txt") for line in file_object: for word in line.split(): words_array.append(word) file_object.close() return words_array
e3aadad2575e28b19b83158eb2127437c8aada89
706,674
def clean_cells(nb_node): """Delete any outputs and resets cell count.""" for cell in nb_node['cells']: if 'code' == cell['cell_type']: if 'outputs' in cell: cell['outputs'] = [] if 'execution_count' in cell: cell['execution_count'] = None return nb_node
67dce7ecc3590143730f943d3eb07ae7df9d8145
706,675
def test_f32(heavydb): """If UDF name ends with an underscore, expect strange behaviour. For instance, defining @heavydb('f32(f32)', 'f32(f64)') def f32_(x): return x+4.5 the query `select f32_(0.0E0))` fails but not when defining @heavydb('f32(f64)', 'f32(f32)') def f32_(x): return x+4.5 (notice the order of signatures in heavydb decorator argument). """ @heavydb('f32(f32)', 'f32(f64)') # noqa: F811 def f_32(x): return x+4.5 descr, result = heavydb.sql_execute( 'select f_32(0.0E0) from {heavydb.table_name} limit 1' .format(**locals())) assert list(result)[0] == (4.5,)
157560cc90e3f869d84198eeb26896a76157eb39
706,676
def deprecated_func_docstring(foo=None): """DEPRECATED. Deprecated function.""" return foo
f9c996c4f3735ed2767f0bbb139b1494e2a0fa39
706,677