content
stringlengths
22
815k
id
int64
0
4.91M
def str_to_bool(v): """ :type v: str """ return v.lower() in ("true", "1")
3,600
def home_event_manager(): """ Route for alumni's home :return: """ if "idUsers" in session and session["UserTypes_idUserTypes"] == 2: return redirect("/events") else: session.clear() return redirect("/login")
3,601
def define_url_service(settings_dict) -> str: """Define the url service for the client. It prioritizes ENV variable over settings module""" url = os.environ.get(defaults.SERVICE_URL_ENV) if url: return url else: return settings_dict.get("WORKFLOW_SERVICE", defaults.SERVICE_URL)
3,602
def make_analytics_slices( record: Mapping[str, Any], key_value_map: Mapping[str, Any], start_date: str, end_date: str = None ) -> Iterable[Mapping[str, Any]]: """ We drive the ability to directly pass the prepared parameters inside the stream_slice. The output of this method is ready slices for analytics streams: """ # define the base_slice base_slice = get_parent_stream_values(record, key_value_map) # add chunked fields, date_slices to the base_slice analytics_slices = [] for fields_set in chunk_analytics_fields(): base_slice["fields"] = ",".join(map(str, fields_set)) for date_slice in make_date_slices(start_date, end_date): base_slice.update(**date_slice) analytics_slices.append(base_slice.copy()) yield from analytics_slices
3,603
def test_check_file_relevance_and_format_path_ignored_non_pack_files(input_file_path): """ Given - file path to validate When - file is not in Packs directory Then - return None, file is ignored """ validator_obj = ValidateManager(is_external_repo=True, check_is_unskipped=False) assert validator_obj.check_file_relevance_and_format_path(input_file_path, None, set()) == ('', '', True)
3,604
def md5_encode(text): """ 把數據 md5 化 """ md5 = hashlib.md5() md5.update(text.encode('utf-8')) encodedStr = md5.hexdigest().upper() return encodedStr
3,605
def _ngrams(segment, n): """Extracts n-grams from an input segment. Parameters ---------- segment: list Text segment from which n-grams will be extracted. n: int Order of n-gram. Returns ------- ngram_counts: Counter Contain all the nth n-grams in segment with a count of how many times each n-gram occurred. """ ngram_counts = Counter() for i in range(0, len(segment) - n + 1): ngram = tuple(segment[i:i + n]) ngram_counts[ngram] += 1 return ngram_counts
3,606
def process_file_scanexpr (container, filename, data): """ Process a single file :param container: str, path and filename of container if the file is within a zip archive, None otherwise. :param filename: str, path and filename of file on disk, or within the container. :param data: bytes, content of the file if it is in a container, None if it is a file on disk. """ #TODO: replace print by writing to a provided output file (sys.stdout by default) if container: display_filename = '%s in %s' % (filename, container) else: display_filename = filename print('='*79) print('FILE:', display_filename) all_code = '' try: #TODO: handle olefile errors, when an OLE file is malformed import oletools oletools.olevba.enable_logging() log.debug('opening {}'.format(filename)) vba = VBA_Parser(filename, data, relaxed=True) if vba.detect_vba_macros(): # Read in document metadata. ole = olefile.OleFileIO(filename) meta.metadata = ole.get_metadata() #print 'Contains VBA Macros:' for (subfilename, stream_path, vba_filename, vba_code) in vba.extract_macros(): # hide attribute lines: #TODO: option to disable attribute filtering vba_code = filter_vba(vba_code) print('-'*79) print('VBA MACRO %s ' % vba_filename) print('in file: %s - OLE stream: %s' % (subfilename, repr(stream_path))) print('- '*39) # detect empty macros: if vba_code.strip() == '': print('(empty macro)') else: # TODO: option to display code print(vba_code) vba_code = vba_collapse_long_lines(vba_code) all_code += '\n' + vba_code print('-'*79) print('EVALUATED VBA EXPRESSIONS:') t = prettytable.PrettyTable(('Obfuscated expression', 'Evaluated value')) t.align = 'l' t.max_width['Obfuscated expression'] = 36 t.max_width['Evaluated value'] = 36 for expression, expr_eval in scan_expressions(all_code): t.add_row((repr(expression), repr(expr_eval))) print(t) else: print('No VBA macros found.') except: #TypeError: #raise #TODO: print more info if debug mode #print sys.exc_value # display the exception with full stack trace for debugging, but do not stop: traceback.print_exc() print('')
3,607
def p_ProtocolDefn(p): """ProtocolDefn : OptionalSendSemanticsQual PROTOCOL ID '{' ProtocolBody '}' ';'""" protocol = p[5] protocol.loc = locFromTok(p, 2) protocol.name = p[3] protocol.sendSemantics = p[1] p[0] = protocol if Parser.current.type == 'header': _error(protocol.loc, 'can\'t define a protocol in a header. Do it in a protocol spec instead.')
3,608
def batch(ctx, batches, batches_files, tuning_search_dict, tuning_search_file, no_wait, list_contexts, list_output_dirs, list_inputs, runner, local_concurrency, lsf_threads, lsf_memory, lsf_queue, lsf_fast_queue, lsf_resources, lsf_priority, action_on_existing, action_on_pending, prefix_outputs_path, forwarded_args): """Run on all the inputs/tests/recordings in a given batch using the LSF cluster.""" if not batches_files: click.secho(f'WARNING: Could not find how to identify input tests.', fg='red', err=True, bold=True) click.secho(f'Consider adding to qaboard.yaml somelike like:\n```\ninputs:\n batches: batches.yaml\n```', fg='red', err=True) click.secho(f'Where batches.yaml is formatted like in http://qa-docs/docs/batches-running-on-multiple-inputs', fg='red', err=True) return if not batches: if not len(forwarded_args): click.secho(f'ERROR: you must provide a batch', fg='red', err=True, bold=True) click.secho(f'Use either `qa batch BATCH`, or `qa batch --batch BATCH_2 --batch BATCH_2`', fg='red', err=True) exit(1) single_batch, *forwarded_args = forwarded_args batches = [single_batch] print_url(ctx) existing_outputs = get_outputs(ctx.obj) command_id = str(uuid.uuid4()) # unique IDs for triggered runs makes it easier to wait/cancel them os.environ['QA_BATCH']= 'true' # triggered runs will be less verbose than with just `qa run` os.environ['QA_BATCHES_FILES'] = json.dumps([str(b) for b in batches_files]) dryrun = ctx.obj['dryrun'] or list_output_dirs or list_inputs or list_contexts should_notify_qa_database = (is_ci or ctx.obj['share']) and not (dryrun or ctx.obj['offline']) if should_notify_qa_database: command_data = { "command_created_at_datetime": datetime.datetime.utcnow().isoformat(), "argv": sys.argv, "runner": runner, **ctx.obj, } job_url = getenvs(('BUILD_URL', 'CI_JOB_URL', 'CIRCLE_BUILD_URL', 'TRAVIS_BUILD_WEB_URL')) # jenkins, gitlabCI, cirlceCI, travisCI if job_url: command_data['job_url'] = job_url if not os.environ.get('QA_BATCH_COMMAND_HIDE_LOGS'): notify_qa_database(object_type='batch', command={command_id: command_data}, **ctx.obj) tuning_search, filetype = load_tuning_search(tuning_search_dict, tuning_search_file) default_runner_options = { "type": runner, "command_id": command_id, } # Each runner should add what it cares about... # TODO: Having --runner-X prefixes makes it all a mess, but still the help text is useful # TODO: It would be nice to generate the CLI help depending on the runner that's choosen, then we could use if runner == 'lsf': default_runner_options.update({ "project": lsf_config.get('project', str(project) if project else "qaboard"), "max_threads": lsf_threads, "max_memory": lsf_memory, 'resources': lsf_resources, "queue": lsf_queue, "fast_queue": lsf_fast_queue, "user": ctx.obj['user'], }) if runner == "local": default_runner_options["concurrency"] = local_concurrency if runner == 'local' or runner == 'celery': default_runner_options["cwd"] = ctx.obj['previous_cwd'] if 'previous_cwd' in ctx.obj else os.getcwd() jobs = JobGroup(job_options=default_runner_options) inputs_iter = iter_inputs(batches, batches_files, ctx.obj['database'], ctx.obj['configurations'], ctx.obj['platform'], default_runner_options, config, ctx.obj['inputs_settings']) for run_context in inputs_iter: input_configuration_str = serialize_config(run_context.configurations) for tuning_file, tuning_hash, tuning_params in iter_parameters(tuning_search, filetype=filetype, extra_parameters=ctx.obj['extra_parameters']): if not prefix_outputs_path: batch_conf_dir = make_batch_conf_dir( outputs_commit, ctx.obj["batch_label"], run_context.platform, run_context.configurations, tuning_params, ctx.obj['share'] ) else: batch_conf_dir = outputs_commit / prefix_outputs_path if tuning_file: batch_conf_dir = batch_conf_dir / Path(tuning_file).stem from qaboard.conventions import slugify_hash input_dir = run_context.rel_input_path.with_suffix('') if len(input_dir.as_posix()) > 90: input_dir = Path(slugify_hash(input_dir.as_posix(), maxlength=90)) run_context.output_dir = batch_conf_dir / input_dir if forwarded_args: run_forwarded_args = [a for a in forwarded_args if not a in ("--keep-previous", "--no-postprocess", "--save-manifests-in-database")] if run_forwarded_args: run_context.extra_parameters = {"forwarded_args": run_forwarded_args, **tuning_params} else: run_context.extra_parameters = tuning_params else: run_context.extra_parameters = tuning_params if list_output_dirs: print(run_context.output_dir) break if list_inputs: print(run_context.input_path) break matching_existing_outputs = [o for o in existing_outputs.values() if url_to_dir(o['output_dir_url']) == run_context.output_dir] matching_existing_output = matching_existing_outputs[0] if matching_existing_outputs else None # at most 1, garanteed by database constaints is_pending = matching_existing_output['is_pending'] if matching_existing_output else False is_failed = matching_existing_output['is_failed'] if matching_existing_output else run_context.is_failed() ran_before = True if matching_existing_output else run_context.ran() should_run = not is_pending and (action_on_existing=='run' or is_failed or not ran_before) if not should_run and action_on_existing=='skip': continue if is_pending and action_on_pending == 'skip': continue if not forwarded_args: forwarded_args_cli = None else: if not on_windows: # FIXME: we assume no single quotes... forwarded_args_cli = ' '.join(f"'{a}'" for a in forwarded_args) else: from .compat import escaped_for_cli forwarded_args_cli = ' '.join(escaped_for_cli(a) for a in forwarded_args) if input_configuration_str == get_default_configuration(ctx.obj['inputs_settings']): configuration_cli = None else: # We can't use --config, or "-c A -c B" until we ensure all clients updated a version supporting it if not on_windows: configuration = input_configuration_str.replace("'", "'\"'\"'") # support single-quotes configuration_cli = f"--configuration '{configuration}'" else: from .compat import escaped_for_cli configuration_cli = f'--configuration {escaped_for_cli(input_configuration_str)}' # We could serialize properly the run_context/runner_options, and e.g. call "qa --pickled-cli" and use the CLI command below just for logs... args = [ f"qa", f'--share' if ctx.obj["share"] else None, f'--offline' if ctx.obj['offline'] else None, f'--label "{ctx.obj["raw_batch_label"]}"' if ctx.obj["raw_batch_label"] != default_batch_label else None, f'--platform "{run_context.platform}"' if run_context.platform != default_platform else None, # TODO: make it customizable in batches f'--type "{run_context.type}"' if run_context.type != default_input_type else None, f'--database "{run_context.database.as_posix()}"' if run_context.database != get_default_database(ctx.obj['inputs_settings']) else None, configuration_cli, f'--tuning-filepath "{tuning_file}"' if tuning_params else None, 'run' if should_run else action_on_existing, f'--input "{run_context.rel_input_path}"', f'--output "{run_context.output_dir}"' if prefix_outputs_path else None, forwarded_args_cli if forwarded_args_cli else None, ] command = ' '.join([arg for arg in args if arg is not None]) click.secho(command, fg='cyan', err=True) click.secho(f" {run_context.output_dir if run_context.output_dir.is_absolute else run_context.output_dir.relative_to(subproject)}", fg='blue', err=True) import re if 'QA_TESTING' in os.environ: # we want to make sure we test the current code command = re.sub('^qa', 'python -m qaboard', command) if str(subproject) != '.': command = f"cd {subproject} && {command}" run_context.command = command run_context.job_options['command_id'] = command_id job = Job(run_context) if should_notify_qa_database and not is_pending: # TODO: accumulate and send all at once to avoid 100s of requests? db_output = notify_qa_database(**{ **ctx.obj, **run_context.obj, # for now we don't want to worry about backward compatibility, and input_path being abs vs relative... "is_pending": True, }) if db_output: # Note: the ID is already in the matching job above job.id = db_output["id"] if is_pending: wait_command = f"qa wait --output-id {matching_existing_output['id']}" if action_on_pending=="sync": job.id = matching_existing_output['id'] job.run_context.command = wait_command elif action_on_pending=="wait": job.run_context.command = f"{wait_command} || {job.run_context.command}" else: assert action_on_pending=="continue" jobs.append(job) if list_contexts: print(json.dumps([serialize_paths(j.run_context.asdict()) for j in jobs], indent=2)) return if not dryrun: is_failed = jobs.start( blocking=not no_wait, qa_context=ctx.obj, ) from .gitlab import gitlab_token, update_gitlab_status if gitlab_token and jobs and is_ci and 'QABOARD_TUNING' not in os.environ: update_gitlab_status(commit_id, 'failed' if is_failed else 'success', ctx.obj["batch_label"], f"{len(jobs)} results") if is_failed and not no_wait: del os.environ['QA_BATCH'] # restore verbosity print_url(ctx, status="failure") exit(1)
3,609
def build_varint(val): """Build a protobuf varint for the given value""" data = [] while val > 127: data.append((val & 127) | 128) val >>= 7 data.append(val) return bytes(data)
3,610
def show_user_workspace(): """Shows the path of the user's workspace.""" click.echo(f'current workspace: {dict_workspace["active_workspace"]}')
3,611
def save_plot_values(temp_arrays, temp_names, out_dir, isParallel=True, saveInTextFormat=True, isDebug=True): """ Saves arrays provided in the list in npy format """ # Return if not master process # if isParallel: # if not du.is_master_proc(): # return for i in range(len(temp_arrays)): temp_arrays[i] = np.array(temp_arrays[i]) temp_dir = out_dir # if cfg.TRAIN.TRANSFER_EXP: # temp_dir += os.path.join("transfer_experiment",cfg.MODEL.TRANSFER_MODEL_TYPE+"_depth_"+str(cfg.MODEL.TRANSFER_MODEL_DEPTH))+"/" if not os.path.exists(temp_dir): os.makedirs(temp_dir) if saveInTextFormat: # if isDebug: print(f"Saving {temp_names[i]} at {temp_dir+temp_names[i]}.txt in text format!!") np.savetxt(temp_dir+'/'+temp_names[i]+".txt", temp_arrays[i], fmt="%1.2f") else: # if isDebug: print(f"Saving {temp_names[i]} at {temp_dir+temp_names[i]}.npy in numpy format!!") np.save(temp_dir+'/'+temp_names[i]+".npy", temp_arrays[i])
3,612
def _register(plugin_identity, type_identity): """ Registers a plug-in as a specific plug-in type. This registers the plug-in here in the plug-ins module, and then calls the register function of the plug-in type plug-in in case that plug-in wants to do additional work when registering a new plug-in. :param plugin_identity: The identity of the plug-in to register. :param type_identity: The plug-in type with which to register the plug-in. """ if plugin_identity in plugins_by_type[type_identity]: api("logger").warning("Couldn't register plug-in {plugin} as type {plugin_type} because it was already registered.", plugin=plugin_identity, plugin_type=type_identity) return plugins_by_type[type_identity][plugin_identity] = _plugins[plugin_identity] try: plugin_types[type_identity].register(plugin_identity, _plugins[plugin_identity]) except Exception as e: api("logger").error("Couldn't register plug-in {plugin} as type {plugin_type}: {error_message}", plugin=plugin_identity, plugin_type=type_identity, error_message=str(e)) del plugins_by_type[type_identity][plugin_identity]
3,613
def python_safe_name(s): """ Return a name derived from string `s` safe to use as a Python function name. For example: >>> s = "not `\\a /`good` -safe name ??" >>> assert python_safe_name(s) == 'not_good_safe_name' """ no_punctuation = re.compile(r'[\W_]', re.MULTILINE).sub s = s.lower() s = no_punctuation(' ', s) s = '_'.join(s.split()) if py2 and isinstance(s, unicode): s = s.encode('ascii', 'ignore') return s
3,614
def hash_bytes(hash_type: SupportedHashes, bytes_param: bytes) -> bytes: """Hash arbitrary bytes using a supported algo of your choice. Args: hash_type: SupportedHashes enum type bytes_param: bytes to be hashed Returns: hashed bytes """ hasher = get_hash_obj(hash_type) hasher.update(bytes_param) return hasher.digest()
3,615
def analyze(osi, num_inc=1, dt=None, dt_min=None, dt_max=None, jd=None): """ Performs an analysis step. Returns 0 if successful, and <0 if fail Parameters ---------- osi num_inc dt dt_min dt_max jd Returns ------- """ op_type = 'analyze' if dt is None: parameters = [int(num_inc)] elif dt_min is None: parameters = [int(num_inc), float(dt)] else: parameters = [int(num_inc), float(dt), dt_min, dt_max, jd] return osi.to_process(op_type, parameters)
3,616
def load_class(path: str) -> Any: """ Load a class at the provided location. Path is a string of the form: path.to.module.class and conform to the python import conventions. :param path: string pointing to the class to load :return: the requested class object """ try: log.info('loading class : [{}]'.format(path)) module_name, class_name = path.rsplit('.', 1) mod = importlib.import_module(module_name) return getattr(mod, class_name) except Exception: raise ProcessingError('Class loading error : expecting path.to.module.ClassName, got : {}'.format(path))
3,617
def dcg_at_k(r, k, method=0): """Score is discounted cumulative gain (dcg) Relevance is positive real values. Can use binary as the previous methods. Example from http://www.stanford.edu/class/cs276/handouts/EvaluationNew-handout-6-per.pdf # >>> r = [3, 2, 3, 0, 0, 1, 2, 2, 3, 0] # >>> dcg_at_k(r, 1) # 3.0 # >>> dcg_at_k(r, 1, method=1) # 3.0 # >>> dcg_at_k(r, 2) # 5.0 # >>> dcg_at_k(r, 2, method=1) # 4.2618595071429155 # >>> dcg_at_k(r, 10) # 9.6051177391888114 # >>> dcg_at_k(r, 11) 9.6051177391888114 Args: r: Relevance scores (list or numpy) in rank order (first element is the first item) k: Number of results to consider method: If 0 then weights are [1.0, 1.0, 0.6309, 0.5, 0.4307, ...] If 1 then weights are [1.0, 0.6309, 0.5, 0.4307, ...] Returns: Discounted cumulative gain """ r = np.asfarray(r)[:k] if r.size: if method == 0: return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1))) elif method == 1: return np.sum(r / np.log2(np.arange(2, r.size + 2))) else: raise ValueError('method must be 0 or 1.') return 0.
3,618
def main(): """ Boggle Game - print out the words in sequences of adjacent letters. """ start = time.time() #################### boggle_dict = {} # User input 4 rows of letters 4*4 row_1 = input('1 row of letters: ') if len(row_1) > 7: print("Illegal Input.") else: row_1 = row_1.lower() boggle_dict['0'] = row_1.split() row_2 = input('2 row of letters: ') if len(row_2) > 7: print("Illegal Input.") else: row_2 = row_2.lower() boggle_dict['1'] = row_2.split() row_3 = input('3 row of letters: ') if len(row_3) > 7: print("Illegal Input.") else: row_3 = row_3.lower() boggle_dict['2'] = row_3.split() row_4 = input('4 row of letters: ') if len(row_4) > 7: print("Illegal Input.") else: row_4 = row_4.lower() boggle_dict['3'] = row_4.split() read_dictionary() find_anagram(boggle_dict) print(f'There are {len(find_list)} words in total.') #################### end = time.time() print('----------------------------------') print(f'The speed of your boggle algorithm: {end - start} seconds.')
3,619
def metric_section(data_model, metric, level) -> str: """Return the metric as Markdown section.""" markdown = markdown_header(metric["name"], level=level, index=True) markdown += markdown_paragraph(metric["description"]) markdown += definition_list("Default target", metric_target(metric)) markdown += definition_list("Scales", *metric_scales(metric)) markdown += definition_list("Default tags", *metric["tags"]) markdown += "```{admonition} Supporting sources\n" for source in metric["sources"]: source_name = data_model["sources"][source]["name"] default = " (default)" if source == metric.get("default_source", "no default source") else "" markdown += f"- [{source_name}]({metric_source_slug(metric['name'], source_name)}){default}\n" markdown += "```\n" return markdown
3,620
def make_argument_parser(): """ Creates an ArgumentParser to read the options for this script from sys.argv """ parser = argparse.ArgumentParser() parser.add_argument("nifti", help="Nifti file to be processed.") parser.add_argument("--out", default=None, help="Output pickle file of roi dict.") parser.add_argument("--txt", default=None, help="Readable txt file of rois.") return parser
3,621
def get_pkgs(rpmdir): """scan a dir of rpms and generate a pkgs structure. first try parsing the filename. if that fails, try parsing the rpm headers. """ pkgs = {} """ pkgs structure: * pkgs is a dict of package name, rpmblob list pairs: pkgs = {name:[rpmblob,rpmblob...], name:[rpmblob,rpmblob...]} * rpmblob is a dict describing an rpm file: rpmblob = {'file':'foo-0.1-5.i386.rpm', 'name':'foo', 'version':'0.1', 'release':'5', 'subarch':'i386'}, example: pkgs = { 'foo' : [ {'file':'foo-0.1-5.i386.rpm', 'name':'foo', 'version':'0.1', 'release':'5', 'subarch':'i386'}, {'file':'foo-0.2-3.i386.rpm', 'name':'foo', 'version':'0.2', 'release':'3', 'subarch':'i386'}], 'bar' : [ {'file':'bar-3.2a-12.mips.rpm', 'name':'bar', 'version':'3.2a', 'release':'12', 'subarch':'mips'}, {'file':'bar-3.7j-4.mips.rpm', 'name':'bar', 'version':'3.7j', 'release':'4', 'subarch':'mips'}] } """ rpms = [item for item in os.listdir(rpmdir) if item.endswith('.rpm')] for filename in rpms: (name, version, release, subarch) = parse_rpm_filename(rpmdir, filename) rpmblob = {'file': filename, 'name': name, 'version': version, 'release': release, 'subarch': subarch} if name in pkgs: pkgs[name].append(rpmblob) else: pkgs[name] = [rpmblob] return pkgs
3,622
def MAKEFOURCC(ch0: str, ch1: str, ch2: str, ch3: str) -> int: """Implementation of Window's `MAKEFOURCC`. This is simply just returning the bytes of the joined characters. `MAKEFOURCC(*"DX10")` can also be implemented by `Bytes(b"DX10")`. Args: ch0 (str): First char ch1 (str): Second char ch2 (str): Third char ch3 (str): Fourth char Returns: int: The integer representation of given characters. **Reference**: `Microsoft <https://goo.gl/bjtMFA>`__ """ return (ord(ch0) << 0) | (ord(ch1) << 8) | (ord(ch2) << 16) | (ord(ch3) << 24)
3,623
def upload(serial_port, path, eeprom=False, run=True, gpio_pin=-1, progress=do_nothing, terminal=False): """Upload file on given serial port and call the progress handler when done. Arguments: serial_port -- Serial port name in a PySerial compatible format, eg: /dev/ttyUSB0 path -- File path to Propeller .eeprom or .binary file Keyword arguments: eeprom -- Boolean. Pass True to upload binary file at path to EEPROM otherwise it will be uploaded to RAM only. run -- Boolean. Pass True to run the uploaded binary when done. progress -- Progress handler, must accept a single message string. """ with Loader(serial_port, gpio_pin) as loader: progress("Uploading {}".format(path)) loader.upload(path=path, eeprom=eeprom, run=run, progress=progress, terminal=terminal) progress("Done")
3,624
def sort_configs(configs): # pylint: disable=R0912 """Sort configs by global/package/node, then by package name, then by node name Attributes: configs (list): List of config dicts """ result = [] # Find all unique keys and sort alphabetically _keys = [] for config in configs: if config["key"] not in _keys: _keys.append(config["key"]) _keys = sorted(_keys, key=str.lower) # For each key find globals, then packages, then nodes for key in _keys: _packages = [] _nodes = [] for config in configs: if config["key"] == key: if config["type"] == "global": result.append(config) elif config["type"] == "package": _packages.append(config) elif config["type"] == "node": _nodes.append(config) # Sort the package end node elements alphabetically _package_ids = sorted([_package["id"] for _package in _packages], key=str.lower) for package in _package_ids: for config in configs: if config["key"] == key and config["type"] == "package" and config["id"] == package: result.append(config) break _node_ids = sorted([_node["id"] for _node in _nodes], key=str.lower) for node in _node_ids: for config in configs: if config["key"] == key and config["type"] == "node" and config["id"] == node: result.append(config) break return result
3,625
def read_user_config(): """Returns keys in lowercase of xlwings.conf in the user's home directory""" config = {} if Path(xlwings.USER_CONFIG_FILE).is_file(): with open(xlwings.USER_CONFIG_FILE, "r") as f: for line in f: values = re.findall(r'"[^"]*"', line) if values: config[values[0].strip('"').lower()] = os.path.expandvars( values[1].strip('"') ) return config
3,626
def MakeTableData( visible_results, starred_items, lower_columns, lower_group_by, users_by_id, cell_factories, id_accessor, related_issues, config, context_for_all_issues=None): """Return a list of list row objects for display by EZT. Args: visible_results: list of artifacts to display on one pagination page. starred_items: list of IDs/names of items in the current project that the signed in user has starred. lower_columns: list of column names to display, all lowercase. These can be combined column names, e.g., 'priority/pri'. lower_group_by: list of column names that define row groups, all lowercase. users_by_id: dict mapping user IDs to UserViews. cell_factories: dict of functions that each create TableCell objects. id_accessor: function that maps from an artifact to the ID/name that might be in the starred items list. related_issues: dict {issue_id: issue} of pre-fetched related issues. config: ProjectIssueConfig PB for the current project. context_for_all_issues: A dictionary of dictionaries containing values passed in to cell factory functions to create TableCells. Dictionary form: {issue_id: {'rank': issue_rank, 'issue_info': info_value, ..}, issue_id: {'rank': issue_rank}, ..} Returns: A list of TableRow objects, one for each visible result. """ table_data = [] group_cell_factories = [ ChooseCellFactory(group.strip('-'), cell_factories, config) for group in lower_group_by] # Make a list of cell factories, one for each column. factories_to_use = [ ChooseCellFactory(col, cell_factories, config) for col in lower_columns] current_group = None for idx, art in enumerate(visible_results): row = MakeRowData( art, lower_columns, users_by_id, factories_to_use, related_issues, config, context_for_all_issues) row.starred = ezt.boolean(id_accessor(art) in starred_items) row.idx = idx # EZT does not have loop counters, so add idx. table_data.append(row) row.group = None # Also include group information for the first row in each group. # TODO(jrobbins): This seems like more overhead than we need for the # common case where no new group heading row is to be inserted. group = MakeRowData( art, [group_name.strip('-') for group_name in lower_group_by], users_by_id, group_cell_factories, related_issues, config, context_for_all_issues) for cell, group_name in zip(group.cells, lower_group_by): cell.group_name = group_name if group == current_group: current_group.rows_in_group += 1 else: row.group = group current_group = group current_group.rows_in_group = 1 return table_data
3,627
def test_atomic_hex_binary_min_length_nistxml_sv_iv_atomic_hex_binary_min_length_1_1(mode, save_output, output_format): """ Type atomic/hexBinary is restricted by facet minLength with value 1. """ assert_bindings( schema="nistData/atomic/hexBinary/Schema+Instance/NISTSchema-SV-IV-atomic-hexBinary-minLength-1.xsd", instance="nistData/atomic/hexBinary/Schema+Instance/NISTXML-SV-IV-atomic-hexBinary-minLength-1-1.xml", class_name="NistschemaSvIvAtomicHexBinaryMinLength1", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
3,628
def _add_student_submit(behave_sensibly): """Allow addition of new students Handle both "good" and "bad" versions (to keep code DRY) """ try: if behave_sensibly: do_add_student_good( first_name=request.forms.first_name, last_name=request.forms.last_name, card_info=request.forms.card_info, ) else: do_add_student_bad( first_name=request.forms.first_name, last_name=request.forms.last_name, card_info=request.forms.card_info, ) except psycopg2.DatabaseError: pass return redirect("/students")
3,629
def test_py2dict(): """Test UNTL Elements are converted to a dictionary.""" title = us.Title(qualifier='serialtitle', content='The Bronco') name = us.Name(content='Case, J.') creator = us.Creator(qualifier='aut') creator.add_child(name) elements = us.Metadata() elements.add_child(title) elements.add_child(creator) metadata_dict = mg.py2dict(elements) assert metadata_dict == {'title': [{'qualifier': 'serialtitle', 'content': 'The Bronco'}], 'creator': [{'qualifier': 'aut', 'content': {'name': 'Case, J.'}}]}
3,630
def test_stockwell_api(): """Test stockwell functions""" epochs = Epochs(raw, events, # XXX pick 2 has epochs of zeros. event_id, tmin, tmax, picks=[0, 1, 3], baseline=(None, 0)) for fmin, fmax in [(None, 50), (5, 50), (5, None)]: with warnings.catch_warnings(record=True): # zero papdding power, itc = tfr_stockwell(epochs, fmin=fmin, fmax=fmax, return_itc=True) if fmax is not None: assert_true(power.freqs.max() <= fmax) with warnings.catch_warnings(record=True): # padding power_evoked = tfr_stockwell(epochs.average(), fmin=fmin, fmax=fmax, return_itc=False) # for multitaper these don't necessarily match, but they seem to # for stockwell... if this fails, this maybe could be changed # just to check the shape assert_array_almost_equal(power_evoked.data, power.data) assert_true(isinstance(power, AverageTFR)) assert_true(isinstance(itc, AverageTFR)) assert_equal(power.data.shape, itc.data.shape) assert_true(itc.data.min() >= 0.0) assert_true(itc.data.max() <= 1.0) assert_true(np.log(power.data.max()) * 20 <= 0.0) assert_true(np.log(power.data.max()) * 20 <= 0.0)
3,631
def _get_table_reference(self, table_id): """Constructs a TableReference. Args: table_id (str): The ID of the table. Returns: google.cloud.bigquery.table.TableReference: A table reference for a table in this dataset. """ return TableReference(self, table_id)
3,632
def request_item_js( request ): """ Returns modified javascript file for development. Hit by a `dev_josiah_request_item.js` url; production hits the apache-served js file. """ js_unicode = u'' current_directory = os.path.dirname(os.path.abspath(__file__)) js_path = u'%s/lib/josiah_request_item.js' % current_directory with open( js_path ) as f: js_utf8 = f.read() js_unicode = js_utf8.decode( u'utf-8' ) js_unicode = js_unicode.replace( u'library.brown.edu', request.get_host() ) return HttpResponse( js_unicode, content_type = u'application/javascript; charset=utf-8' )
3,633
def get_present_types(robots): """Get unique set of types present in given list""" return {type_char for robot in robots for type_char in robot.type_chars}
3,634
def _deserialize_row(params, mask): """ This is for stochastic vectors where some elements are forced to zero. Such a vector is defined by a number of parameters equal to the length of the vector minus one and minus the number of elements forced to zero. @param params: an array of statistical parameters @param mask: bools such that False forces zero probability @return: a mask-conformant list of nonnegative floats """ row = np.zeros(mask.shape) row[mask] = [1.0] + np.exp(params).tolist() row /= row.sum() return row
3,635
def ruru_old_log_checker(s): """ 古いログ形式ならTrue、そうでないならFalseを返す :param s: :return: """ time_data_regex = r'[0-9]{4}\/[0-9]{2}\/[0-9]{2}\s[0-9]{2}:[0-9]{2}:[0-9]{2}' # るる鯖新ログ形式なら1つ目のdiv:d12150で時刻が取得可能。そうでないなら取得不可 time_data = re.search(time_data_regex, str(s.find('div', class_='d12150'))) return False if time_data else True
3,636
def read(file_name, code='utf-8'): """ async generator to read file in with special delimeter :param file_name: the way to the file :param code: encoding of file (utf-8) :return: generator with all parts of file """ with open(file_name, 'r', encoding=code) as file: for line in file: yield News(line) yield ""
3,637
def get_user_map_best(beatmap_id, user, enabled_mods=0): """ gets users best play on map :param beatmap_id: beatmap id :param user: username :param enabled_mods: mods used :return: list of plays """ response = OSU_API.get('/get_scores', {"b": beatmap_id, "u": user, "mods": enabled_mods}).json() if Config.debug: Log.log(response) # if len(response) == 0: # raise NoScore("Couldn't find user score for this beatmap") for i, j in enumerate(response): response[i] = Play(j) response[i].beatmap_id = beatmap_id return response
3,638
def preprocess(sc,inputDir,file_format,outputDir): """ this method just reads the offer file and creates vertexrdd and edgerdd required for graphx vertexrdd will be node uri and type edgesrdd will be node a,node b,edge type :param inputDir: :param file_format: :return: """ fileUtil = FileUtil(sc) inputrdd=fileUtil.load_file(inputDir,file_format=file_format,data_type='json') vertexrdd = inputrdd.flatMapValues(lambda x:nodes_mapper(x)) #rdd = vertexrdd.foreach(lambda (x,y):f(x,y)) edgerdd = inputrdd.flatMapValues(lambda x : edges_mapper(x)) fileUtil.save_file(vertexrdd,outputDir+'vertex',file_format='text',data_type='json') fileUtil.save_file(edgerdd,outputDir+'edges',file_format='text',data_type='json')
3,639
def scale_z_by_atom(z, scale, copy=True): """ Parameters ---------- z_ : array, shape (n_trials, n_atoms, n_times - n_times_atom + 1) Can also be a list of n_trials LIL-sparse matrix of shape (n_atoms, n_times - n_times_atom + 1) The sparse activation matrix. scale : array, shape = (n_atoms, ) The scales to apply on z. """ if is_list_of_lil(z): n_trials, n_atoms, n_times_valid = get_z_shape(z) assert n_atoms == len(scale) if copy: z = deepcopy(z) for z_i in z: for k in range(z_i.shape[0]): z_i.data[k] = [zikt * scale[k] for zikt in z_i.data[k]] else: if copy: z = z.copy() z *= scale[None, :, None] return z
3,640
def test_handle_time_limits(generate_workchain_base, generate_remote_data, generate_retrieved_data): """Test `FleurBaseWorkChain._handle_time_limits`.""" from aiida.common import LinkType process = generate_workchain_base(exit_code=FleurCalculation.exit_codes.ERROR_TIME_LIMIT) process.setup() process.validate_inputs() #Sets up all the context in order for the memory error handler to work code = process.ctx.inputs.code #Add outgoing remote folder process.ctx.children[-1].store() remote = generate_remote_data(code.computer, '/tmp') remote.add_incoming(process.ctx.children[-1], link_type=LinkType.CREATE, link_label='remote_folder') remote.store() generate_retrieved_data(process.ctx.children[-1], 'default') result = process._handle_time_limits(process.ctx.children[-1]) assert isinstance(result, ProcessHandlerReport) assert result.do_break assert result.exit_code.status == 0 assert process.ctx.inputs.metadata.options['max_wallclock_seconds'] == 12 * 60 * 60 assert process.ctx.num_machines == 2 assert process.ctx.inputs.parent_folder.uuid == remote.uuid assert 'fleurinpdata' not in process.ctx.inputs process.ctx.inputs.metadata.options['max_wallclock_seconds'] = 80000 #doubling goes over the maximum specified process.ctx.num_machines = 14 #doubling goes over the maximum specified result = process.inspect_process() assert result.status == 0 assert process.ctx.inputs.metadata.options['max_wallclock_seconds'] == 86400 assert process.ctx.num_machines == 20 assert process.ctx.inputs.parent_folder.uuid == remote.uuid assert 'fleurinpdata' not in process.ctx.inputs
3,641
def index(): """ example action using the internationalization operator T and flash rendered by views/default/index.html or views/generic.html if you need a simple wiki simply replace the two lines below with: return auth.wiki() """ if auth.is_logged_in(): # # if newly registered user is not in auth_membership add him as an administrator if not db(db.auth_membership.user_id == auth.user_id).count() > 0: auth.add_membership(auth.id_group(ADMIN), auth.user_id) session.user_info = get_user_info() response.user_info = session.user_info if request.user_agent().is_mobile: return response.render('../views/default/index-m.html') else: return response.render('../views/default/index.html')
3,642
def test_ct_i026_ct_i026_v(mode, save_output, output_format): """ TEST :Syntax Checking for top level complexType Declaration : schema with finalDefault = 'restriction extension' and final='restriction' , derived complexType by extension """ assert_bindings( schema="msData/complexType/ctI026.xsd", instance="msData/complexType/ctI026.xml", class_name="Root", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
3,643
def compute_classification_metrics_at_ks(is_match, num_predictions, num_trgs, k_list=[5,10], meng_rui_precision=False): """ :param is_match: a boolean np array with size [num_predictions] :param predicted_list: :param true_list: :param topk: :return: {'precision@%d' % topk: precision_k, 'recall@%d' % topk: recall_k, 'f1_score@%d' % topk: f1, 'num_matches@%d': num_matches} """ assert is_match.shape[0] == num_predictions #topk.sort() if num_predictions == 0: precision_ks = [0] * len(k_list) recall_ks = [0] * len(k_list) f1_ks = [0] * len(k_list) num_matches_ks = [0] * len(k_list) num_predictions_ks = [0] * len(k_list) else: num_matches = np.cumsum(is_match) num_predictions_ks = [] num_matches_ks = [] precision_ks = [] recall_ks = [] f1_ks = [] for topk in k_list: if topk == 'M': topk = num_predictions elif topk == 'G': #topk = num_trgs if num_predictions < num_trgs: topk = num_trgs else: topk = num_predictions if meng_rui_precision: if num_predictions > topk: num_matches_at_k = num_matches[topk-1] num_predictions_at_k = topk else: num_matches_at_k = num_matches[-1] num_predictions_at_k = num_predictions else: if num_predictions > topk: num_matches_at_k = num_matches[topk - 1] else: num_matches_at_k = num_matches[-1] num_predictions_at_k = topk precision_k, recall_k, f1_k = compute_classification_metrics(num_matches_at_k, num_predictions_at_k, num_trgs) precision_ks.append(precision_k) recall_ks.append(recall_k) f1_ks.append(f1_k) num_matches_ks.append(num_matches_at_k) num_predictions_ks.append(num_predictions_at_k) return precision_ks, recall_ks, f1_ks, num_matches_ks, num_predictions_ks
3,644
def coordinatesOfPosition(shape, distance): """Compute the point at a given distance from the beginning of a shape. The shape is a list of points. A point is a sequence of two floats. The returned point is the x- and y-coordinate of the point that has the given distance along the line of the shape from its starting point. The shape must contain at least one point coordinate. If the distance argument is larger than the length of the shape, the last point of the shape is returned. """ prevPoint = shape[0] currentDistance = 0.0 for point in shape: diffX = point[0] - prevPoint[0] diffY = point[1] - prevPoint[1] sectionLength = math.sqrt(diffX * diffX + diffY * diffY) if currentDistance + sectionLength > distance: fraction = (distance - currentDistance) / sectionLength return (prevPoint[0] + diffX * fraction, prevPoint[1] + diffY * fraction) currentDistance += sectionLength if cliArgs.debug: print "coordinatesOfPosition: Exceeded the shape." return point
3,645
def compute_dose_median_scores(null_dist_medians, dose_list): """ Align median scores per dose, this function return a dictionary, with keys as dose numbers and values as all median scores for each dose """ median_scores_per_dose = {} for dose in dose_list: median_list = [] for keys in null_distribution_medians: dose_median_list = null_distribution_medians[keys][dose-1] median_list += dose_median_list median_scores_per_dose[dose] = median_list return median_scores_per_dose
3,646
def generate_converter(name, taskdep, **options) : """ taskdep 是执行该程序之前应该执行的任务 task_html_generator 表示的是能够生成html的任务,我们需要从这个任务中提取result taskname是生成的任务名 """ converter = options.get('converter', Pandoc("-f", "html", "-t", "markdown", "--wrap=none")) flowdep = options.get('flowdep', taskdep[0]) return lift_process_to_task(name, converter, taskdep, flowdep=flowdep)
3,647
def log_to_tensorboard(writer, step, prefix, loss): """ Log metrics to Tensorboard. """ log_generic_to_tensorboard(writer, step, prefix, "loss", loss)
3,648
def save_model_all(model, save_dir, model_name, epoch): """ :param model: :param save_dir: :param model_name: :param epoch: :return: """ if not os.path.isdir(save_dir): os.makedirs(save_dir) save_prefix = os.path.join(save_dir, model_name) save_path = '{}_epoch_{}.pt'.format(save_prefix, epoch) print("save all model to {}".format(save_path)) output = open(save_path, mode="wb") torch.save(model.state_dict(), output) output.close()
3,649
def symmetric_product(tensor): """ Symmetric outer product of tensor """ shape = tensor.size() idx = list(range(len(shape))) idx[-1], idx[-2] = idx[-2], idx[-1] return 0.5 * (tensor + tensor.permute(*idx))
3,650
def prep_image(img, inp_dim): """ Function: Prepare image for inputting to the neural network. Arguments: img -- image it self inp_dim -- dimension for resize the image (input dimension) Return: img -- image after preparing """ img = (letterbox_image(img, (inp_dim, inp_dim))) img = img[:,:,::-1].transpose((2,0,1)).copy() img = torch.from_numpy(img).float().div(255.0).unsqueeze(0) return img
3,651
def display_similarity_matches(img, segm, patchSize, nbBins, classifier, axis_in_degrees=None): """Display the map of similar and non similar matches over the original image thanks to respectively green and red circles. # Arguments : im: The image whose textures symmetry has been evaluated. segm: The corresponding segmented image. preds: The predictions given by the `symmetryTexturePred()` function. points: The list of points correspondind to used patches in the image (`textureDataExtractor()` function). reference: The part of the image taken as a reference ("Upper" or "Lower") (`textureDataExtractor()` function). axis_in_degrees: rotation to apply to image before analysis. # Outputs : Display the map of similarity. """ # Rotate images if axis_in_degrees: # Compute center of mass segm = img_as_ubyte(segm / 255) properties = regionprops(segm) centroid = properties[0].centroid img = rotate(img, angle=axis_in_degrees, center=centroid, mode='symmetric') segm = rotate(segm, angle=axis_in_degrees, center=centroid) # Crop images to be centered on the lesion blkSeg = np.zeros((np.shape(segm)[0] + 2, np.shape(segm)[1] + 2)) blkSeg[1:np.shape(blkSeg)[0] - 1, 1:np.shape(blkSeg)[1] - 1] = segm segm = blkSeg contour = find_contours(segm, 0) cnt = contour[0] minx = min(cnt[:, 1]) maxx = max(cnt[:, 1]) miny = min(cnt[:, 0]) maxy = max(cnt[:, 0]) segm = segm[max(0, int(miny) - 1):int(maxy) + 1, max(0, int(minx) - 1):int(maxx) + 1] img = img[max(0, int(miny) - 1):int(maxy), max(0, int(minx) - 1):int(maxx) + 1] fig, axs = plt.subplots(1, 2, sharex=True, sharey=True) fig.suptitle("Texture Symmetry", fontsize=20) axs[0].axis('off') axs[0].imshow(img, cmap=plt.cm.gray) axs[0].set_title('Rotated and cropped') draw_similarity_matches(axs[1], img, segm, patchSize, nbBins, classifier) axs[1].set_title('Similar (green) and different (Red) patches') plt.show()
3,652
async def auth_check(request): """ No-op view to set the session cookie, this is used by websocket since the "Set-Cookie" header doesn't work with 101 upgrade """ return json_response(status='ok')
3,653
def check_constraint(term_freq,top_terms,top_terms_test_freq): """ Check the constraint 12%-30% for the test set term_freq is the dictionnary of all term frequencies top_terms is the list of terms we care about (first 300?) top_terms_freq is an array of frequency of top terms in test set. RETURN True if constraint satisfied, False otherwise """ return check_constraint_12pc(term_freq,top_terms,top_terms_test_freq) and check_constraint_30pc(term_freq,top_terms,top_terms_test_freq)
3,654
def create_LSTM_model(patient_idx, time_steps, save_model=False, plot_loss=False): """ Trains an LSTM model over a patient @param patient_idx: number @param time_steps: number of concatenated heartbeats per datapoint @param save_model: whether to save the model to h5 file @param plot_loss: whether to plot the loss during training @return: """ orig_data = np.load(os.path.join("Working_Data/Normalized_Fixed_Dim_HBs_Idx" + str(patient_idx) + ".npy")) data = orig_data[0:1000, :, :] # print(data[0:10].reshape(10000,4).shape) X, y = create_lstm_datapoints(data, time_steps) model = Sequential() model.add(LSTM(30, input_shape=(X.shape[1], X.shape[2]))) model.add(Dropout(rate=0.2)) model.add(RepeatVector(X.shape[1])) model.add(LSTM(30, return_sequences=True)) model.add(Dropout(rate=0.2)) model.add(TimeDistributed(Dense(X.shape[2]))) model.compile(optimizer='adam', loss='mse') model.summary() history = model.fit(X, X, epochs=100, batch_size=1, validation_split=0.1, callbacks=[keras.callbacks.EarlyStopping(monitor='loss', patience=3, mode='min')], shuffle=False) if save_model: model.save(f"Working_Data/LSTM_Model_Idx{patient_idx}.h5") if plot_loss: # plot the loss plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.savefig("Working_Data/lstm_loss.png") plt.show() print("loss of the model is: ") print(history.history['loss']) print(f"Created LSTM model for patient {patient_idx}") return model
3,655
def get_supported_providers() -> list[str]: """ Return the list of supported discussion providers TODO: Load this from entry points? """ providers = [ 'legacy', 'piazza', ] return providers
3,656
def predict_split(history, prediction_length=7*24, hyperparameters={}): """ This function predicts a time series of gas prices by splitting it into a tren and a residual and then applying a feature pipeline and predicting each of them individually. Keyword arguments: history -- the time series to split up prediction_length -- the number of time steps to predict (default 7*24) hyperparameters -- values used for the prediction model (default {}) Return value: 2 time series predicted: trend and residual """ #extract parameters r = hyperparameters["r"] if "r" in hyperparameters else default_hyperparameters["r"] #split data trend, res = split_trend(history) #create index for prediction time series index_pred = pd.date_range( start=history.index.max() + timedelta(hours=1), end=history.index.max() + timedelta(hours=prediction_length), freq="1H", tz=pytz.utc ) #predict the trend trend_pred = predict_ts( (trend - trend.shift(1)).fillna(0.), get_feature_pipeline("trend", hyperparameters), index_pred, hyperparameters=hyperparameters ).cumsum() + trend.iloc[-1] #compute residual prediction res_pred = predict_ts( res.iloc[-r:], get_feature_pipeline("res", hyperparameters), index_pred, hyperparameters=hyperparameters ) #alternative: using AR from statsmodels #res_model = AR(res) #res_results = res_model.fit(disp=-1, maxlag=p) #res_pred = res_results.predict(len(res), len(res) + prediction_length) #return result return trend_pred, res_pred
3,657
def SFRfromLFIR(LFIR): """ Kennicut 1998 To get Star formation rate from LFIR (8-1000um) LFIR in erg s-1 SFR in Msun /year """ SFR = 4.5E-44 * LFIR return SFR
3,658
def test_valid_remote_your_discr_1(control, valid_data, mocker): # noqa: F811 """Inject a valid packet and monitor the log""" valid_data['your_discr'] = control.sessions[0].local_discr packet = bitstring.pack(PACKET_FORMAT, **valid_data) mocker.patch('aiobfd.control.log') control.process_packet(packet, '127.0.0.1') aiobfd.control.log.info.assert_not_called() aiobfd.control.log.warning.assert_not_called() aiobfd.control.log.debug.assert_not_called() control.sessions[0]._tx_packets.cancel()
3,659
def save_element_as_file(element, filename, height, width): """ Saves any element as an image file. Element needs to have an underlyiong Widget available (almost if not all of them do) :param element: The element to save :param filename: The filename to save to. The extension of the filename determines the format (jpg, png, gif, ?) """ widget = element.Widget box = (widget.winfo_rootx(), widget.winfo_rooty(), widget.winfo_rootx() + widget.winfo_width() - (257 - width), widget.winfo_rooty() + widget.winfo_height() - (257 - height)) grab = ImageGrab.grab(bbox=box) grab.save(filename)
3,660
def inc_group_layers(n_list, d_list, c_list): """ Helper function for inc_tmm. Groups and sorts layer information. See coh_tmm for definitions of n_list, d_list. c_list is "coherency list". Each entry should be 'i' for incoherent or 'c' for 'coherent'. A "stack" is a group of one or more consecutive coherent layers. A "stack index" labels the stacks 0,1,2,.... The "within-stack index" counts the coherent layers within the stack 1,2,3... [index 0 is the incoherent layer before the stack starts] An "incoherent layer index" labels the incoherent layers 0,1,2,... An "alllayer index" labels all layers (all elements of d_list) 0,1,2,... Returns info about how the layers relate: * stack_d_list[i] = list of thicknesses of each coherent layer in the i'th stack, plus starting and ending with "inf" * stack_n_list[i] = list of refractive index of each coherent layer in the i'th stack, plus the two surrounding incoherent layers * all_from_inc[i] = j means that the layer with incoherent index i has alllayer index j * inc_from_all[i] = j means that the layer with alllayer index i has incoherent index j. If j = nan then the layer is coherent. * all_from_stack[i1][i2] = j means that the layer with stack index i1 and within-stack index i2 has alllayer index j * stack_from_all[i] = [j1 j2] means that the layer with alllayer index i is part of stack j1 with withinstack-index j2. If stack_from_all[i] = nan then the layer is incoherent * inc_from_stack[i] = j means that the i'th stack comes after the layer with incoherent index j, and before the layer with incoherent index j+1. * stack_from_inc[i] = j means that the layer with incoherent index i comes immediately after the j'th stack. If j=nan, it is not immediately following a stack. * num_stacks = number of stacks * num_inc_layers = number of incoherent layers * num_layers = number of layers total """ if (d_list.ndim != 1): raise ValueError("Problem with n_list or d_list!") if (d_list[0] != np.inf) or (d_list[-1] != np.inf): raise ValueError('d_list must start and end with inf!') if (c_list[0] != 'i') or (c_list[-1] != 'i'): raise ValueError('c_list should start and end with "i"') if not len(n_list) == d_list.size == len(c_list): raise ValueError('List sizes do not match!') inc_index = 0 stack_index = 0 stack_d_list = [] stack_n_list = [] all_from_inc = [] inc_from_all = [] all_from_stack = [] stack_from_all = [] inc_from_stack = [] stack_from_inc = [] stack_in_progress = False for alllayer_index in range(len(n_list)): if c_list[alllayer_index] == 'c': # coherent layer inc_from_all.append(np.nan) if not stack_in_progress: # this layer is starting new stack stack_in_progress = True ongoing_stack_d_list = [np.inf, d_list[alllayer_index]] ongoing_stack_n_list = [n_list[alllayer_index - 1], n_list[alllayer_index]] stack_from_all.append([stack_index, 1]) all_from_stack.append([alllayer_index - 1, alllayer_index]) inc_from_stack.append(inc_index - 1) within_stack_index = 1 else: # another coherent layer in the same stack ongoing_stack_d_list.append(d_list[alllayer_index]) ongoing_stack_n_list.append(n_list[alllayer_index]) within_stack_index += 1 stack_from_all.append([stack_index, within_stack_index]) all_from_stack[-1].append(alllayer_index) elif c_list[alllayer_index] == 'i': # incoherent layer stack_from_all.append(np.nan) inc_from_all.append(inc_index) all_from_inc.append(alllayer_index) if not stack_in_progress: # previous layer was also incoherent stack_from_inc.append(np.nan) else: # previous layer was coherent stack_in_progress = False stack_from_inc.append(stack_index) ongoing_stack_d_list.append(np.inf) stack_d_list.append(ongoing_stack_d_list) ongoing_stack_n_list.append(n_list[alllayer_index]) stack_n_list.append(ongoing_stack_n_list) all_from_stack[-1].append(alllayer_index) stack_index += 1 inc_index += 1 else: raise ValueError("Error: c_list entries must be 'i' or 'c'!") return {'stack_d_list': stack_d_list, 'stack_n_list': stack_n_list, 'all_from_inc': all_from_inc, 'inc_from_all': inc_from_all, 'all_from_stack': all_from_stack, 'stack_from_all': stack_from_all, 'inc_from_stack': inc_from_stack, 'stack_from_inc': stack_from_inc, 'num_stacks': len(all_from_stack), 'num_inc_layers': len(all_from_inc), 'num_layers': len(n_list)}
3,661
def get_heater_device_json(): """ returns information about the heater in json """ return '{\n "state" : "' + _pretty_state_identifier(brew_logic.heater_state) + '",\n "overridden" : "' + str(brew_logic.heater_override).lower() + '"\n }'
3,662
def rfc_deploy(): """This function trains a Random Forest classifier and outputs the out-of-sample performance from the validation and test sets """ df = pd.DataFrame() for pair in pairs: # retrieving the data and preparing the features dataset = gen_feat(pair) dataset.drop(['Open', 'High', 'Low', 'Close', 'volume'], axis=1, inplace=True) # selecting the features to train on cols = list(dataset.columns) feats = cols[2:] #splitting into training, validation and test sets df_train = dataset.iloc[:-100,:] train = df_train.copy() df_test = dataset.iloc[-100:,:] test = df_test.copy() train_f = train.iloc[:-100,:] valid = train.iloc[-100:,:] #training the algorithm m = rfc(train_f[feats], train_f['dir']) # test sets test_pred = m.predict(test[feats]) test_proba = m.predict_proba(test[feats]) df1 = pd.DataFrame(test_pred,columns=['prediction'], index=test.index) proba_short = [] proba_long = [] for x in range(len(test_proba)): proba_short.append(test_proba[x][0]) proba_long.append(test_proba[x][-1]) proba = {'proba_short': proba_short, 'proba_long': proba_long} df2 = pd.DataFrame(proba, index=test.index) df1['probability'] = np.where(df1['prediction'] == 1, df2['proba_long'], np.where(df1['prediction'] == -1, df2['proba_short'], 0)) df1['signal'] = np.where((df1['probability'] >= .7) & (df1['prediction'] == 1), 'Go Long', np.where((df1['probability'] >= 0.7) & (df1['prediction'] == -1), 'Go Short', 'Stand Aside')) df1.reset_index(inplace=True) df1['pair'] = pair df1.set_index('pair', inplace=True) entry_sig = df1[['probability', 'signal']].iloc[-1:] # Merge df = pd.concat([df, entry_sig], axis=0) #output return df
3,663
def test_log_two_tasks(): """ Test tailing a single file on two separate tasks """ returncode, stdout, stderr = exec_command( ['dcos', 'task', 'log', 'test-app']) assert returncode == 0 assert stderr == b'' lines = stdout.decode('utf-8').split('\n') assert len(lines) == 11 assert re.match('===>.*<===', lines[0]) assert re.match('===>.*<===', lines[5])
3,664
def get_block(block_name): """Get block from BLOCK_REGISTRY based on block_name.""" if not block_name in BLOCK_REGISTRY: raise Exception(NO_BLOCK_ERR.format( block_name, BLOCK_REGISTRY.keys())) block = BLOCK_REGISTRY[block_name] return block
3,665
def task4_a2(): """ Write a program to copy its input to its output, replacing each string of one or more blanks by a single blank. """ s = input('Enter your input: ') print(' '.join(s.split()))
3,666
def _determine_role_name(var_file: Path) -> str: """ Lookup role name from directory or galaxy_info. """ if var_file.is_file(): role_path: Path = var_file.parent / ".." name = str(role_path.resolve().name) meta_path: Path = role_path / 'meta' / 'main.yml' if (meta_path.is_file()): with open(str(meta_path), 'r') as f: meta = yaml.load(f, Loader=SafeLoader) try: role_name = meta['galaxy_info']['role_name'] name = role_name except BaseException: pass return name
3,667
def load_table(file_path, metadata_ext='.pklmetadata'): """ Loads a pickled DataFrame from a file along with its metadata. This function loads a DataFrame from a file stored in pickle format. Further, this function looks for a metadata file with the same file name but with an extension given by the user (defaults to '.pklmetadata'. If the metadata file is present, the function will update the metadata for that DataFrame in the catalog. Args: file_path (string): The file path to load the file from. metadata_ext (string): The metadata file extension (defaults to '.pklmetadata') that should be used to generate metadata file name. Returns: If the loading is successful, the function will return a pandas DataFrame read from the file. The catalog will be updated with the metadata read from the metadata file (if the file was present). Raises: AssertionError: If `file_path` is not of type string. AssertionError: If `metadata_ext` is not of type string. Examples: >>> A = em.load_table('./A.pkl') >>> A = em.load_table('./A.pkl', metadata_ext='.pklmeta') See Also: :meth:`~py_entitymatching.save_table` Note: This function is different from read_csv_metadata in two aspects. First, this function currently does not support reading in candidate set tables, where there are more metadata such as ltable, rtable than just 'key', and conceptually the user is expected to provide ltable and rtable information while calling this function. ( this support will be added shortly). Second, this function loads the table stored in a pickle format. """ # Validate input parameters validate_object_type(file_path, six.string_types, error_prefix='Input file path') validate_object_type(metadata_ext, six.string_types) # Load the object from the file path. Note that we use a generic load # object to load in the DataFrame too. data_frame = load_object(file_path) # Load metadata from file path # # Check if the meta data file is present if ps._is_metadata_file_present(file_path, extension=metadata_ext): # Construct the metadata file name, and read it from the disk. # # Get the file name used to load the DataFrame file_name, _ = os.path.splitext(file_path) # # Construct the metadata file name metadata_filename = file_name + metadata_ext # # Load the metadata from the disk metadata_dict = load_object(metadata_filename) # Update the catalog with the properties read from the disk for property_name, property_value in six.iteritems(metadata_dict): if property_name == 'key': # If the property_name is key call set_key as the function # will check for the integrity of key before setting it in # the catalog cm.set_key(data_frame, property_value) else: cm.set_property(data_frame, property_name, property_value) else: # If the metadata file is not present then issue a warning logger.warning('There is no metadata file') # Return the DataFrame return data_frame
3,668
def main() -> int: """Ensure runtime environment is ready, and start the server.""" app.utils.setup_runtime_environment() for safety_check in ( app.utils.ensure_supported_platform, # linux only at the moment app.utils.ensure_local_services_are_running, # mysql (if local) app.utils.ensure_directory_structure, # .data/ & achievements/ dir structure app.utils.ensure_dependencies_and_requirements, # submodules & oppai-ng built ): if (exit_code := safety_check()) != 0: return exit_code """ Server should be safe to start """ # install any debugging hooks from # _testing/runtime.py, if present app.utils._install_debugging_hooks() # check our internet connection status if not app.utils.check_connection(timeout=1.5): log("No internet connection available.", Ansi.LYELLOW) # show info & any contextual warnings. app.utils.display_startup_dialog() # the server supports both inet and unix sockets. if ( app.utils.is_valid_inet_address(app.settings.SERVER_ADDR) and app.settings.SERVER_PORT is not None ): server_arguments = { "host": app.settings.SERVER_ADDR, "port": app.settings.SERVER_PORT, } elif ( app.utils.is_valid_unix_address(app.settings.SERVER_ADDR) and app.settings.SERVER_PORT is None ): server_arguments = { "uds": app.settings.SERVER_ADDR, } # make sure the socket file does not exist on disk and can be bound # (uvicorn currently does not do this for us, and will raise an exc) if os.path.exists(app.settings.SERVER_ADDR): if ( app.utils.processes_listening_on_unix_socket(app.settings.SERVER_ADDR) != 0 ): log( f"There are other processes listening on {app.settings.SERVER_ADDR}.\n" f"If you've lost it, gulag can be killed gracefully with SIGINT.", Ansi.LRED, ) return 1 else: os.remove(app.settings.SERVER_ADDR) else: raise ValueError( "%r does not appear to be an IPv4, IPv6 or Unix address" % app.settings.SERVER_ADDR, ) from None # run the server indefinitely uvicorn.run( "app.api.init_api:asgi_app", reload=app.settings.DEBUG, log_level=logging.WARNING, server_header=False, date_header=False, # TODO: uvicorn calls .lower() on the key & value, # but i would prefer Gulag-Version to keep # with standards. perhaps look into this. headers=(("gulag-version", app.settings.VERSION),), **server_arguments, ) return 0
3,669
def coco17_category_info(with_background=True): """ Get class id to category id map and category id to category name map of COCO2017 dataset Args: with_background (bool, default True): whether load background as class 0. """ clsid2catid = { 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, 12: 13, 13: 14, 14: 15, 15: 16, 16: 17, 17: 18, 18: 19, 19: 20, 20: 21, 21: 22, 22: 23, 23: 24, 24: 25, 25: 27, 26: 28, 27: 31, 28: 32, 29: 33, 30: 34, 31: 35, 32: 36, 33: 37, 34: 38, 35: 39, 36: 40, 37: 41, 38: 42, 39: 43, 40: 44, 41: 46, 42: 47, 43: 48, 44: 49, 45: 50, 46: 51, 47: 52, 48: 53, 49: 54, 50: 55, 51: 56, 52: 57, 53: 58, 54: 59, 55: 60, 56: 61, 57: 62, 58: 63, 59: 64, 60: 65, 61: 67, 62: 70, 63: 72, 64: 73, 65: 74, 66: 75, 67: 76, 68: 77, 69: 78, 70: 79, 71: 80, 72: 81, 73: 82, 74: 84, 75: 85, 76: 86, 77: 87, 78: 88, 79: 89, 80: 90 } catid2name = { 0: 'background', 1: 'person', 2: 'bicycle', 3: 'car', 4: 'motorcycle', 5: 'airplane', 6: 'bus', 7: 'train', 8: 'truck', 9: 'boat', 10: 'traffic light', 11: 'fire hydrant', 13: 'stop sign', 14: 'parking meter', 15: 'bench', 16: 'bird', 17: 'cat', 18: 'dog', 19: 'horse', 20: 'sheep', 21: 'cow', 22: 'elephant', 23: 'bear', 24: 'zebra', 25: 'giraffe', 27: 'backpack', 28: 'umbrella', 31: 'handbag', 32: 'tie', 33: 'suitcase', 34: 'frisbee', 35: 'skis', 36: 'snowboard', 37: 'sports ball', 38: 'kite', 39: 'baseball bat', 40: 'baseball glove', 41: 'skateboard', 42: 'surfboard', 43: 'tennis racket', 44: 'bottle', 46: 'wine glass', 47: 'cup', 48: 'fork', 49: 'knife', 50: 'spoon', 51: 'bowl', 52: 'banana', 53: 'apple', 54: 'sandwich', 55: 'orange', 56: 'broccoli', 57: 'carrot', 58: 'hot dog', 59: 'pizza', 60: 'donut', 61: 'cake', 62: 'chair', 63: 'couch', 64: 'potted plant', 65: 'bed', 67: 'dining table', 70: 'toilet', 72: 'tv', 73: 'laptop', 74: 'mouse', 75: 'remote', 76: 'keyboard', 77: 'cell phone', 78: 'microwave', 79: 'oven', 80: 'toaster', 81: 'sink', 82: 'refrigerator', 84: 'book', 85: 'clock', 86: 'vase', 87: 'scissors', 88: 'teddy bear', 89: 'hair drier', 90: 'toothbrush' } if not with_background: clsid2catid = {k - 1: v for k, v in clsid2catid.items()} return clsid2catid, catid2name
3,670
def test_app_version(testing_defaults): """Test app_version default""" assert testing_defaults.info["app_version"] == '4.2.6'
3,671
def _parse_parameters(paramdoc): """Parse parameters and return list of (name, full_doc_string) It is needed to remove multiple entries for the same parameter like it could be with adding parameters from the parent class It assumes that previously parameters were unwrapped, so their documentation starts at the begining of the string, like what should it be after _split_out_parameters """ entries = __re_spliter1.split(paramdoc) result = [(__re_spliter2.split(e)[0].strip(), e) for e in entries if e != ''] if __debug__: debug('DOCH', 'parseParameters: Given "%s", we split into %s' % (paramdoc, result)) return result
3,672
def example_miller_set(example_crystal): """Generate an example miller set.""" ms = miller.set( crystal_symmetry=example_crystal.get_crystal_symmetry(), indices=flex.miller_index([(1, 1, 1)] * 8 + [(2, 2, 2)]), anomalous_flag=False, ) return ms
3,673
async def get(req): """ Get a complete analysis document. """ db = req.app["db"] analysis_id = req.match_info["analysis_id"] document = await db.analyses.find_one(analysis_id) if document is None: return not_found() sample = await db.samples.find_one({"_id": document["sample"]["id"]}, {"quality": False}) if not sample: return bad_request("Parent sample does not exist") read, _ = virtool.samples.utils.get_sample_rights(sample, req["client"]) if not read: return insufficient_rights() await virtool.subtractions.db.attach_subtraction(db, document) if document["ready"]: document = await virtool.analyses.format.format_analysis(req.app, document) return json_response(virtool.utils.base_processor(document))
3,674
def get_cube_point_indexes(cube: xr.Dataset, points: Union[xr.Dataset, pd.DataFrame, Mapping[str, Any]], dim_name_mapping: Mapping[str, str] = None, index_name_pattern: str = DEFAULT_INDEX_NAME_PATTERN, index_dtype=np.float64, cube_asserted: bool = False) -> xr.Dataset: """ Get indexes of given point coordinates *points* into the given *dataset*. :param cube: The cube dataset. :param points: A mapping from column names to column data arrays, which must all have the same length. :param dim_name_mapping: A mapping from dimension names in *cube* to column names in *points*. :param index_name_pattern: A naming pattern for the computed indexes columns. Must include "{name}" which will be replaced by the dimension name. :param index_dtype: Numpy data type for the indexes. If it is a floating point type (default), then *indexes* will contain fractions, which may be used for interpolation. For out-of-range coordinates in *points*, indexes will be -1 if *index_dtype* is an integer type, and NaN, if *index_dtype* is a floating point types. :param cube_asserted: If False, *cube* will be verified, otherwise it is expected to be a valid cube. :return: A dataset containing the index columns. """ if not cube_asserted: assert_cube(cube) dim_name_mapping = dim_name_mapping if dim_name_mapping is not None else {} dim_names = _get_cube_data_var_dims(cube) col_names = [dim_name_mapping.get(dim_name, dim_name) for dim_name in dim_names] _validate_points(points, col_names, param_name="points") indexes = [] for dim_name, col_name in zip(dim_names, col_names): col = points[col_name] coord_indexes = get_dataset_indexes(cube, dim_name, col, index_dtype=index_dtype) indexes.append((index_name_pattern.format(name=dim_name), xr.DataArray(coord_indexes, dims=[INDEX_DIM_NAME]))) return xr.Dataset(dict(indexes))
3,675
def auto_add(): """ 自动添加 1 查找所有amis文件 2 更新记录 3 记录按照app组织,生成dict 4 为每个app生成auto_urls.py :return: """ amis_json_file_list = get_amis_files() cnt = update_rcd(amis_json_file_list) aml_app_dict = get_rcd_by_app_name() add_needed_auto_urls(aml_app_dict) add_urls_needed(aml_app_dict) return cnt
3,676
def _get_configs(cli_args: CLIArgs, project_root: Path) -> Configs: """ Deal with extra configs for 3rd party tool. Parameters ---------- cli_args Commandline arguments passed to nbqa project_root Root of repository, where .git / .hg / .nbqa.ini file is. Returns ------- Configs Taken from CLI (if given), else from .nbqa.ini. """ cli_config: Configs = Configs.parse_from_cli_args(cli_args) file_config: Optional[Configs] = config_parser.parse_config_from_file( cli_args, project_root ) if file_config is not None: cli_config = cli_config.merge(file_config) return cli_config
3,677
def run_gx_test(dataset_path, output_dir, dist_types, ex_config, mp_args): """ The start and end parameter together make an interval that contains the datasets to be included in this experiment :param mp_args: the configuration of the multiprocess backend, go to this site https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-spark-configure.html for the correct Spark configuration with AWS; you only need to worry the configs that are exposed to you -> that is: the number of workers, the max driver memory and the max result size :param dataset_path: the path to the archive datasets :param dataset_soi: (soi: size of interest) a iterable of two integers for binning the experiment :param output_dir: the path to which the result csv's will be saved :param exclude_list: the list of dataset names to be excluded from the experiment archive :param dist_types: a list of strings, must contain at least one item. Items must be ones of the following: eu,ch,ma :param ex_config: a dict contains hyper-parameters for the experiment. They are 'num_sample': int, number of samples to consider in each dataset, set this to math.inf for complete experiment 'query_split': float, a fraction of the dataset to be taken as queries, use 0.2 for the time being '_lb_opt': bool, whether to turn of lower-bounding optimization for DTW, leave it False in not otherwise specified 'radius': int, the length radius for Genex Query, leave it being 1 if not otherwise specified 'use_spark': bool, whether to use the Spark backend, leave it being True if not otherwise specified 'loi_range': float, only consider sequences within a percentage length of the longest sequence, use 0.1 for the time being 'st': float, hyper-parameters that determines the cluster boundary in genex.build, leave it being True if not otherwise specified 'paa_seg': the n segment of PAA, use 3 as a heuristic approach """ valid_dt = ['eu', 'ch', 'ma'] try: assert os.path.isdir(dataset_path) assert os.path.isdir(output_dir) assert 0 < len(dist_types) <= 3 assert np.all([x in valid_dt for x in dist_types]) except AssertionError: raise Exception('Assertion failed in checking parameters') exp_set_list = [generate_ex_set_GENEX(dataset_path, output_dir, dt) for dt in dist_types] return [run_exp_set_GENEX(es, mp_args, **ex_config) for es in exp_set_list]
3,678
def main(): """ This is main function used to call other function :return: nothing """ image_path = input("Enter path of image:") # call read function to read an image img = Read(image_path) # call flip function to sharp the image sharp_image = Sharp(img) # call show function to show original image and sharp image Show(img, sharp_image)
3,679
def _load_lib(): """Load libary in build/lib.""" curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) lib_path = os.path.join(curr_path, '../../build/lib/') path_to_so_file = os.path.join(lib_path, "libc_runtime_api.so") lib = ctypes.CDLL(path_to_so_file, ctypes.RTLD_GLOBAL) _check_functions(lib, DNNL_LIB) return lib
3,680
def robust_standardize(df: pd.DataFrame, excluded_colnames: list = None) -> pd.DataFrame: """ Applies the RobustScaler from the module sklearn.preprocessing by removing the median and scaling the data according to the quantile range (IQR). This transformation is robust to outliers. Note: In case multiple dataframes are used (i.e., several partitions of the dataset in training and testing), make sure that all of them will be passed to this method at once, and as one single dataframe. Otherwise, the normalization will be carried out on local (as opposed to global) extrema, hence unrepresentative IQR. This is a bad practice. :param df: The dataframe to be normalized. :param excluded_colnames: The name of non-numeric (e.g., TimeStamp, ID etc.) that must be excluded before normalization takes place. They will be added back to the normalized data. :return: The same dataframe as input, with the label column unchanged, except that now the numerical values are transformed into new range determined by IQR. """ from sklearn.preprocessing import RobustScaler excluded_colnames = excluded_colnames if excluded_colnames else [] colnames_original_order = list(df) # Separate data (numeric) from those to be excluded (ids and class_labels) included_cnames = [colname for colname in list(df) if colname not in excluded_colnames] # Exclude all non-numeric columns df_numeric = df[included_cnames].select_dtypes(include=np.number) # set-difference between the original and numeric columns excluded_cnames = list(set(colnames_original_order) - set(list(df_numeric))) df_excluded = df[excluded_cnames] # prepare normalizer and normalize scaler = RobustScaler() res_ndarray = scaler.fit_transform(df_numeric) df_numeric = pd.DataFrame(res_ndarray, columns=list(df_numeric), dtype=float) # Reset the indices (so that they match) df_excluded.reset_index() df_numeric.reset_index() # Add the excluded columns back df_norm = df_excluded.join(df_numeric) # Restore the original oder of columns df_norm = df_norm[colnames_original_order] return df_norm
3,681
def meanStdDev( valueList, scale ): """Compute the mean and standard deviation of a *non-empty* list of numbers.""" numElements = len(valueList) if numElements == 0: return(None, 0.0) mean = float(sum(valueList)) / numElements variance = 0 for value in valueList: variance += math.pow( value - mean, 2 ) variance = variance / float(numElements) return (scale * mean, scale * math.sqrt(variance))
3,682
def audience_filter(digest, audience): """Check whether the current audience level should include that digest.""" return get_split( digest, [ { "key": "audience_{}".format(idx), "size": 1.0 } for idx in range(0, 100) ] ) < audience
3,683
def request_specific_data2num(batch_data): """ input: next_batch_requestable request_specific_data[slot]. change the data into processable type for tensorflow :param batch_data: 一个 batch 的训练数据 :return: 直接输入request-specific tracker 模型计算的数据 """ batchsize_request = len(batch_data) x_usr = np.zeros((batchsize_request, max_length, embedding_dim)) x_usr_len = np.zeros((batchsize_request), dtype='int32') x_slot = np.zeros((batchsize_request, embedding_dim)) for batch_id, data in enumerate(batch_data): for word_id, word in enumerate(data[1]): if word in vocab_dict: x_usr[batch_id, word_id, :] = embedding_table[word] else: x_usr[batch_id, word_id, :] = embedding_table['unk'] x_usr_len[batch_id] = len(data[1]) x_slot[batch_id, :] = embedding_table[data[2]] return x_usr, x_usr_len, x_slot
3,684
def model_pred(auto_model=True, model_date=None): # 模型预测 """ :param auto_model: 是否自动获取最新的模型,默认为True :param model_date: 如果auto_model = False, 手动指定对应日期的模型 :return: """ if auto_model: model_pred_machine = pump_model_pred(station_name, obj_type) else: model_pred_machine = pump_model_pred(station_name, obj_type, model_date) ## 生成采样日期,每5min一个 sample_dates = pd.date_range(start=start_time, end=end_time, freq='5min') outputs = [] ## 预测结果表 result = pd.DataFrame(columns=res_columns) for date in sample_dates: end_dates = date start_dates = date - pd.Timedelta(minutes=175) # 前三小时 # 提取实时数据 realtime_data = df[start_dates:end_dates] # 前三小时的实时数据 # 提取前7天数据 seven_end_date = date - pd.Timedelta(days=1) + pd.Timedelta(minutes=15) seven_start_date = seven_end_date - pd.Timedelta(days=7) history_data = df[seven_start_date:seven_end_date] history_data = history_data[1:] # print(history_data) # 输出预测结果 next_val = model_pred_machine.pred(realtime_data, history_data) if need_order: ## 计算指令和状态信号 if (target_name == 'hx_water_level') or (target_name == 'xfx_water_level'): current_val = realtime_data[target_name].iloc[-1] current_water_level = current_val bottom_pressure = realtime_data['bottom_pressure'].iloc[-1] # print(date) signal = order_Generator.signal_cal(current_val, next_val, current_water_level) res = pd.DataFrame([[current_val, next_val, signal]], columns=res_columns, index=[date]) result = result.append(res) if order_Generator.order_cal(bottom_pressure, current_water_level): order = order_Generator.order_cal(bottom_pressure, current_water_level) print(order) ## 测试指令拒绝与接受静默机制 # Response = eval(input('请输入回复指令(接受-2 拒绝-4):')) # Response = 2 # if Response == 2: # order_Generator.silence_flag = True # elif Response == 4: # print('当前信号序列末置位为:{}'.format(order_Generator.control_signals[-1])) # order_Generator.control_signals[-1] = order_Generator.control_signals[-2] # print('当前信号序列末置位为:{}'.format(order_Generator.control_signals[-1])) # order_Generator.signals[-1] = order_Generator.signals[-2] # print('当前泵站状态为:{}'.format(order_Generator.flag)) # order_Generator.flag = order_Generator.signals[-1] # print('当前泵站状态为:{}'.format(order_Generator.flag)) # order_Generator.silence_flag = True # print('静默模式状态为:{}'.format(order_Generator.silence_flag)) ## 写入指令 # with open('./record.txt', 'a') as f: # f.write(str(order) + '\n') if (target_name == 'hx_pressure') or (target_name == 'xfx_pressure'): current_val = realtime_data[target_name].iloc[-1] bottom_pressure = realtime_data['bottom_pressure'].iloc[-1] signal = order_Generator.signal_cal(current_val, next_val, bottom_pressure) # print(signal, date) res = pd.DataFrame([[current_val, next_val, signal]], columns=res_columns, index=[date]) result = result.append(res) if order_Generator.order_cal(bottom_pressure): order = order_Generator.order_cal(bottom_pressure) print(order) outputs.append(next_val) result.to_csv('./res/{tn}_result.csv'.format(tn=target_name)) # 储存对应的预测结果 if plot: label_dict = {'hx_water_level': '华翔泵站液位', 'hx_pressure': '华翔泵站压力', 'xfx_water_level': '新凤溪泵站液位', 'xfx_pressure': '新凤溪泵站压力'} fig, ax = plt.subplots(3, 1, figsize=(16, 7), sharex=True) obs = df[target_name][start_time + pd.Timedelta(minutes=5):end_time + pd.Timedelta(minutes=5)].values idx = pd.date_range(start=start_time + pd.Timedelta(minutes=5), end=end_time + pd.Timedelta(minutes=5), freq='5min') diff = np.diff(outputs, prepend=0) diff[0] = 0 ax[0].plot(idx, obs, c='gray', label='obs') ax[0].plot(idx, outputs, c='blue', label='pred') ax[0].set_ylabel(label_dict[target_name], fontsize=12) ax[1].plot(idx, diff, c='blue', label='diff') ax[1].set_ylabel('差分值', fontsize=12) ax[2].plot(result.index, result['signal']) ax[2].set_ylabel('泵站信号', fontsize=12) ax[0].legend(loc='upper right', fontsize=12) ax[1].legend(loc='upper right', fontsize=12) plt.tight_layout() plt.savefig('./fig/{tn}_pred_plot'.format(tn=target_name))
3,685
def points_from_x0y0x1y1(xyxy): """ Constructs a polygon representation from a rectangle described as a list [x0, y0, x1, y1] """ [x0, y0, x1, y1] = xyxy return "%s,%s %s,%s %s,%s %s,%s" % ( x0, y0, x1, y0, x1, y1, x0, y1 )
3,686
def get_columns(invoice_list, additional_table_columns): """return columns based on filters""" columns = [ _("Invoice") + ":Link/Sales Invoice:120", _("Posting Date") + ":Date:80", _("Status") + "::80", _("Customer") + ":Link/Customer:120", _("Sales Person") + ":Link/Sales Person:100", _("AR Status") + "::75", _("Territory") + ":Link/Territory:100", _("SKU") + ":Link/Item:100", _("Qty") + ":Float:50", _("Price List") + ":Currency/currency:120", _("Discount") + ":Currency/currency:120", _("Net Price") + ":Currency/currency:120", _("Amount") + ":Currency/currency:120" ] columns = columns + [_("Outstanding Amount") + ":Currency/currency:120"] return columns
3,687
def one_norm(a): """ Return the one-norm of the matrix. References: [0] https://www.mathworks.com/help/dsp/ref/matrix1norm.html Arguments: a :: ndarray(N x N) - The matrix to compute the one norm of. Returns: one_norm_a :: float - The one norm of a. """ return anp.max(anp.sum(anp.abs(a), axis=0))
3,688
def test_degree(): """Tests for nodes of the given degree.""" os.chdir(os.path.dirname(__file__) + '/data') proc = subprocess.Popen(['swc', 'find', 'pass_simple_branch.swc', '-g', '2'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) stdout, stderr = proc.communicate() assert proc.returncode == 0 assert stdout == '3 9 \n' assert stderr == ''
3,689
def remove_nan_inf(df, reindex=True): """ Removes all rows that have NaN, inf or -inf as a value, and then optionally reindexes the dataframe. Parameters ---------- df : pd.DataFrame Dataframe to remove NaNs and Infs from. reindex : bool, optional Reindex the dataframe so that there are no missing indices. Returns ------- df : pd.DataFrame Dataframe with all the NaNs and Infs removed. """ df = df.replace([np.inf, -np.inf], np.nan).dropna() if reindex is True: df = df.reset_index(drop=True) return df
3,690
async def action(**kwargs): """ [infinity] admin_channel_id = admin_id = recruiter_id = """ message = kwargs['message'] config = kwargs['config'] client = kwargs['client'] split_message = message.content.split() admin_channel_id = config.getint('infinity', 'admin_channel_id') admin_id = config.getint('infinity', 'admin_id') guild = message.channel.guild recruiter_id = config.getint('infinity', 'recruiter_id') if utils.find(lambda r: r.id == recruiter_id, message.author.roles): if len(split_message) >= 2 and split_message[0] == '!amos-intro': if len(message.mentions) > 0 and message.channel.name.startswith('ticket-'): target_member = message.mentions[0] if target_member in message.channel.members: await target_member.send("Please start the validation process by sending the command `validate` in this PM.") if message.channel.id == admin_channel_id and message.author.id == admin_id: if len(split_message) > 0 and split_message[0] == '!pt': if len(split_message) == 2: if split_message[1] == 'filldb': with open('pilots.csv') as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') for row in csv_reader: try: discord_id = int(row[0]) member = guild.get_member(discord_id) if member: character_names = [n for n in row[2:] if n != ''] pilot_manager.add_pilot(discord_id, member.name, member.discriminator, character_names=character_names) except: print("Unable to add member") print(traceback.format_exc()) print("DB fill complete.") if split_message[1] == 'builddb': pilot_manager.build_db() await message.channel.send("DB build complete.") if split_message[1] == 'at-update': pilot_manager.copy_to_airtable() await message.channel.send("Airtable copy complete.") if split_message[1] == 'audit': message_list = [] result_string = "" missing_count = 0 # TODO: hardcoded channel ID target_channel = guild.get_channel(722851595519262733) for member in target_channel.members: if not member.bot: pilot = pilot_manager.get_pilot(member.id) if not pilot: missing_count += 1 if len(result_string) >= 1900: message_list.append(result_string) result_string = f"{member.name}#{member.discriminator} - {[r.name for r in member.roles]}\n" else: result_string += f"{member.name}#{member.discriminator} - {[r.name for r in member.roles]}\n" await message.channel.send( f"Out of {len(guild.members)} members, {missing_count} either haven't provided a profile screenshot or provided one that couldn't be processed.") for outmsg in message_list: await message.channel.send(f"```\n{outmsg}```") if split_message[1] == 'help': await message.channel.send(help_str) if len(split_message) == 3: if split_message[1] == 'create-pilots': target_role_id = int(split_message[2]) target_role = guild.get_role(target_role_id) for member in target_role.members: pilot_manager.add_pilot(member.id, member.name, member.discriminator, character_names=[]) if len(split_message) == 4: if split_message[1] == 'grant-role': target_channel_id = int(split_message[3]) target_channel = message.guild.get_channel(target_channel_id) target_role_id = int(split_message[2]) target_role = guild.get_role(target_role_id) user_count = 0 for user in target_channel.members: if not user.bot: user_count += 1 await user.add_roles(target_role, reason=f"[Amos BOT] - adding user via grant-member command from {target_role_id}.") await message.channel.send(f"{user_count} users have been added to the role.")
3,691
def plot_insert_len(insert_len_filename, settings_filename, output_dir): """ Plot insert length distribution. """ if not os.path.isfile(settings_filename): print "Error: settings filename %s not found." %(settings_filename) sys.exit(1) plot_name = os.path.basename(insert_len_filename) sashimi_obj = Sashimi(plot_name, output_dir, settings_filename=settings_filename) settings = sashimi_obj.settings num_bins = settings["insert_len_bins"] output_filename = sashimi_obj.output_filename sashimi_obj.setup_figure() s = plt.subplot(1, 1, 1) print "Plotting insert length distribution..." print " - Distribution file: %s" %(insert_len_filename) print " - Output plot: %s" %(output_filename) insert_dist, params = pe_utils.load_insert_len(insert_len_filename) mean, sdev, dispersion, num_pairs \ = pe_utils.compute_insert_len_stats(insert_dist) print "min insert: %.1f" %(min(insert_dist)) print "max insert: %.1f" %(max(insert_dist)) plt.title("%s (%d read-pairs)" \ %(plot_name, num_pairs), fontsize=10) plt.hist(insert_dist, bins=num_bins, color='k', edgecolor="#ffffff", align='mid') axes_square(s) ymin, ymax = s.get_ylim() plt.text(0.05, 0.95, "$\mu$: %.1f\n$\sigma$: %.1f\n$d$: %.1f" \ %(round(mean, 2), round(sdev, 2), round(dispersion, 2)), horizontalalignment='left', verticalalignment='top', bbox=dict(edgecolor='k', facecolor="#ffffff", alpha=0.5), fontsize=10, transform=s.transAxes) plt.xlabel("Insert length (nt)") plt.ylabel("No. read pairs") sashimi_obj.save_plot()
3,692
def contingency_table(seg, gt, ignore_seg=[0], ignore_gt=[0], norm=True): """Return the contingency table for all regions in matched segmentations. Parameters ---------- seg : np.ndarray, int type, arbitrary shape A candidate segmentation. gt : np.ndarray, int type, same shape as `seg` The ground truth segmentation. ignore_seg : list of int, optional Values to ignore in `seg`. Voxels in `seg` having a value in this list will not contribute to the contingency table. (default: [0]) ignore_gt : list of int, optional Values to ignore in `gt`. Voxels in `gt` having a value in this list will not contribute to the contingency table. (default: [0]) norm : bool, optional Whether to normalize the table so that it sums to 1. Returns ------- cont : scipy.sparse.csc_matrix A contingency table. `cont[i, j]` will equal the number of voxels labeled `i` in `seg` and `j` in `gt`. (Or the proportion of such voxels if `norm=True`.) """ segr = seg.ravel() gtr = gt.ravel() ij = np.vstack((segr, gtr)) selector = np.ones(segr.shape, np.bool) data = np.ones(len(gtr)) for i in ignore_seg: selector[segr == i] = 0 for j in ignore_gt: selector[gtr == j] = 0 ij = ij[:, selector] data = data[selector] cont = sparse.coo_matrix((data, ij)).tocsc() if norm: cont /= float(cont.sum()) return cont
3,693
def process_swissmodel(output_dir): """ Identify swissmodel models in a directory and copy them to output_dir """ proteins = [Path(i).stem for i in os.listdir('data/swissmodel') if i.endswith('.models')] for protein in proteins: config = pd.read_csv(f'data/swissmodel/{protein}.models', sep='\t', dtype={'model': str}) if not len(config.index) > 0: continue Path(f'{output_dir}/{protein}').mkdir(exist_ok=True) for model, template in zip(config.model, config.template): shutil.copyfile(f'data/foldx/{protein}_{model}/model.pdb', f'{output_dir}/{protein}/{template}.pdb')
3,694
def LengthOfStayRangeAt24Hours(patient, enc): """Generate length of stay range labels at 24 hours after admission. Args: patient: patient proto, needed for label proto. enc: encounter, caller needs to do the proper cohort filterings. Yields: (label_name, value, label_time) tuple. """ label_time = encounter.AtDuration(enc, 24) ecounter_length_days = encounter.EncounterLengthDays(enc) label_val = None for idx in range(len(LOS_BOUNDARIES)): if ecounter_length_days <= LOS_BOUNDARIES[idx]: if idx == 0: label_val = 'less_or_equal_%d' % LOS_BOUNDARIES[idx] else: label_val = '%d_%d' % (LOS_BOUNDARIES[idx - 1], LOS_BOUNDARIES[idx]) break if label_val is None: label_val = 'above_%d' % LOS_BOUNDARIES[-1] yield ComposeLabel(patient, enc, LOS_RANGE_LABEL, label_val, label_time)
3,695
def update_credit_status(blk_id, status): """Change a credit status""" try: database.execute("UPDATE credits SET status=%s WHERE blk_id = %s", (status, blk_id)) log.message('Changed credit status on block_id %d status to %s' % (blk_id, status)) except database.psycopg2.Error as e: raise Exception(e.pgerror) from None except Exception as e: log.error('Failed to change credit status on block_id %d status to %s' % (blk_id, status)) log.error(e)
3,696
def get_basic_track_info(track): """ Given a track object, return a dictionary of track name, artist name, album name, track uri, and track id. """ # Remember that artist and album artist have different entries in the # spotify track object. name = track["name"] artist = track['artists'][0]['name'] album = track['album']['name'] uri = track["uri"] track_id = track['id'] output = {"name": name, "artist": artist, "album": album, "uri": uri, "id": track_id} return output
3,697
def demander_nombre(mini: int = None, maxi: int = None) -> int: """ Demande un nombre à l'utilisateur, situé entre min et max. :param mini: le minimum :param maxi: le maximum :return: le nombre entrée par l'utilisateur """ message = 'Veuillez rentrer un nombre:' if mini is not None and maxi is not None: message = f'Veuillez rentrer un nombre entre {mini} et {maxi}:' elif mini is not None and maxi is None: message = f'Veuillez rentrer un nombre supérieur à {mini}:' while True: nombre = input(message + '\n> ') # On s'assure que l'utilisateur vient de rentrer un nombre try: # On convertit en nombre base 10 nombre = int(nombre) except ValueError: print('Valeur incorrecte.') continue # Le nombre est désormais un entier. On vérifie qu'il coincide avec les valeurs min/max if mini is not None and nombre < mini: print(f'Le nombre entré est trop petit. Il doit valoir au moins {mini}') elif maxi is not None and nombre > maxi: print(f'Le nombre entré est trop grand. Il doit valoir au maximum {maxi}') else: return nombre
3,698
def ordered_load(stream, merge_duplicate_keys=False): """ Parse the first YAML document in a stream and produce the corresponding Python object, using OrderedDicts instead of dicts. If merge_duplicate_keys is True, merge the values of duplicate mapping keys into a list, as the uWSGI "dumb" YAML parser would do. Otherwise, following YAML 1.2 specification which says that "each key is unique in the association", raise a ConstructionError exception. """ def construct_mapping(loader, node, deep=False): loader.flatten_mapping(node) mapping = OrderedDict() merged_duplicate = {} for key_node, value_node in node.value: key = loader.construct_object(key_node, deep=deep) value = loader.construct_object(value_node, deep=deep) if key in mapping: if not merge_duplicate_keys: raise ConstructorError("while constructing a mapping", node.start_mark, "found duplicated key (%s)" % key, key_node.start_mark) log.debug("Merging values for duplicate key '%s' into a list", key) if merged_duplicate.get(key): mapping[key].append(value) else: mapping[key] = [mapping[key], value] merged_duplicate[key] = True else: mapping[key] = value return mapping OrderedLoader.add_constructor( yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, construct_mapping) OrderedLoader.add_constructor('!include', OrderedLoader.include) return yaml.load(stream, OrderedLoader)
3,699