content
stringlengths
22
815k
id
int64
0
4.91M
def is_coroutine_generator(obj): """ Returns whether the given `obj` is a coroutine generator created by an `async def` function, and can be used inside of an `async for` loop. Returns ------- is_coroutine_generator : `bool` """ if isinstance(obj, AsyncGeneratorType): code = obj.ag_code elif isinstance(obj, CoroutineType): code = obj.cr_code elif isinstance(obj, GeneratorType): code = obj.gi_code else: return False if code.co_flags&CO_ASYNC_GENERATOR: return True return False
4,500
def get_scheme(scheme_id): """ Retrieve the scheme dict identified by the supplied scheme ID Returns: An scheme dict """ for node in sd["nodes"]: if scheme_id == node["id"]: return node
4,501
def compute_vtable(cls: ClassIR) -> None: """Compute the vtable structure for a class.""" if cls.vtable is not None: return if not cls.is_generated: cls.has_dict = any(x.inherits_python for x in cls.mro) for t in cls.mro[1:]: # Make sure all ancestors are processed first compute_vtable(t) # Merge attributes from traits into the class if not t.is_trait: continue for name, typ in t.attributes.items(): if not cls.is_trait and not any(name in b.attributes for b in cls.base_mro): cls.attributes[name] = typ cls.vtable = {} if cls.base: assert cls.base.vtable is not None cls.vtable.update(cls.base.vtable) cls.vtable_entries = specialize_parent_vtable(cls, cls.base) # Include the vtable from the parent classes, but handle method overrides. entries = cls.vtable_entries # Traits need to have attributes in the vtable, since the # attributes can be at different places in different classes, but # regular classes can just directly get them. if cls.is_trait: # Traits also need to pull in vtable entries for non-trait # parent classes explicitly. for t in cls.mro: for attr in t.attributes: if attr in cls.vtable: continue cls.vtable[attr] = len(entries) entries.append(VTableAttr(t, attr, is_setter=False)) entries.append(VTableAttr(t, attr, is_setter=True)) all_traits = [t for t in cls.mro if t.is_trait] for t in [cls] + cls.traits: for fn in itertools.chain(t.methods.values()): # TODO: don't generate a new entry when we overload without changing the type if fn == cls.get_method(fn.name): cls.vtable[fn.name] = len(entries) # If the class contains a glue method referring to itself, that is a # shadow glue method to support interpreted subclasses. shadow = cls.glue_methods.get((cls, fn.name)) entries.append(VTableMethod(t, fn.name, fn, shadow)) # Compute vtables for all of the traits that the class implements if not cls.is_trait: for trait in all_traits: compute_vtable(trait) cls.trait_vtables[trait] = specialize_parent_vtable(cls, trait)
4,502
def Axicon(phi, n1, x_shift, y_shift, Fin): """ Fout = Axicon(phi, n1, x_shift, y_shift, Fin) :ref:`Propagates the field through an axicon. <Axicon>` Args:: phi: top angle of the axicon in radians n1: refractive index of the axicon material x_shift, y_shift: shift from the center Fin: input field Returns:: Fout: output field (N x N square array of complex numbers). Example: :ref:`Bessel beam with axicon <BesselBeam>` """ Fout = Field.copy(Fin) k = 2*_np.pi/Fout.lam theta = _np.arcsin(n1*_np.cos(phi/2)+phi/2-_np.pi/2) Ktheta = k * theta yy, xx = Fout.mgrid_cartesian xx -= x_shift yy -= y_shift fi = -Ktheta*_np.sqrt(xx**2+yy**2) Fout.field *= _np.exp(1j*fi) return Fout
4,503
def addrAndNameToURI(addr, sname): """addrAndNameToURI(addr, sname) -> URI Create a valid corbaname URI from an address string and a stringified name""" # *** Note that this function does not properly check the address # string. It should raise InvalidAddress if the address looks # invalid. import urllib if type(addr) is not types.StringType or \ type(sname) is not types.StringType: raise CORBA.BAD_PARAM(omniORB.BAD_PARAM_WrongPythonType, COMPLETED_NO) if addr == "": raise CosNaming.NamingContextExt.InvalidAddress() if sname == "": return "corbaname:" + addr else: stringToName(sname) # This might raise InvalidName return "corbaname:" + addr + "#" + urllib.quote(sname)
4,504
def blkdev_uname_to_taptype(uname): """Take a blkdev uname and return the blktap type.""" return parse_uname(uname)[1]
4,505
def _write_ldif(lines, dn, keytab, admin_principal): """Issue an update to LDAP via ldapmodify in the form of lines of an LDIF file. :param lines: ldif file as a sequence of lines """ cmd = 'kinit -t {keytab} {principal} ldapmodify'.format( keytab=keytab, principal=admin_principal, ) child = pexpect.spawn(cmd, timeout=10) child.expect('SASL data security layer installed.') for line in lines: child.sendline(line) child.sendeof() child.expect('entry "{}"'.format(dn)) child.expect(pexpect.EOF) output_after_adding = child.before.decode('utf8').strip() if 'Already exists (68)' in output_after_adding: raise ValueError('Tried to create duplicate entry.') elif 'No such object (32)' in output_after_adding: raise ValueError('Tried to modify nonexistent entry.') if output_after_adding != '': send_problem_report( dedent( '''\ Unknown problem occured when trying to write to LDAP; the code should be updated to handle this case. dn: {dn} keytab: {keytab} principal: {principal} Unexpected output: {output_after_adding} Lines passed to ldapadd: {lines} ''' ).format( dn=dn, keytab=keytab, principal=admin_principal, output_after_adding=output_after_adding, lines='\n'.join(' ' + line for line in lines) ) ) raise ValueError('Unknown LDAP failure was encountered.')
4,506
def detection_layer(inputs, n_classes, anchors, img_size, data_format): """Creates Yolo final detection layer. Detects boxes with respect to anchors. Args: inputs: Tensor input. n_classes: Number of labels. anchors: A list of anchor sizes. img_size: The input size of the model. data_format: The input format. Returns: Tf value [box_centers, box_shapes, confidence, classes] """ n_anchors = len(anchors) inputs = tf.keras.layers.Conv2D(filters=n_anchors * (5 + n_classes), kernel_size=1, strides=1, use_bias=True, data_format=data_format)(inputs) # Shape of each cell in image shape = inputs.get_shape().as_list() grid_shape = shape[2:4] if data_format == 'channel_first' else shape[1:3] if data_format == 'channels_first': # Put the channel's dim to the last position inputs = tf.transpose(inputs, [0, 2, 3, 1]) inputs = tf.reshape(inputs, [-1, n_anchors * grid_shape[0] * grid_shape[1], 5 + n_classes]) # Strides = # of cells in an image strides = (img_size[0] // grid_shape[0], img_size[1] // grid_shape[1]) box_centers, box_shapes, confidence, classes = \ tf.split(inputs, [2, 2, 1, n_classes], axis=-1) x = tf.range(grid_shape[0], dtype=tf.float32) y = tf.range(grid_shape[1], dtype=tf.float32) x_offset, y_offset = tf.meshgrid(x, y) x_offset = tf.reshape(x_offset, (-1, 1)) y_offset = tf.reshape(y_offset, (-1, 1)) x_y_offset = tf.concat([x_offset, y_offset], axis=-1) x_y_offset = tf.tile(x_y_offset, [1, n_anchors]) x_y_offset = tf.reshape(x_y_offset, [1, -1, 2]) box_centers = tf.nn.sigmoid(box_centers) box_centers = (box_centers + x_y_offset) * strides anchors = tf.tile(anchors, [grid_shape[0] * grid_shape[1], 1]) box_shapes = tf.exp(box_shapes) * tf.cast(anchors, dtype=tf.float32) confidence = tf.nn.sigmoid(confidence) classes = tf.nn.sigmoid(classes) inputs = tf.concat([box_centers, box_shapes, confidence, classes], axis=-1) return inputs
4,507
def main(): """ Prepares an :obj:`~PyQt5.QtWidgets.QApplication` instance and starts the *GuiPy* application. """ # Obtain application instance qapp = QW.QApplication.instance() # If qapp is None, create a new one if qapp is None: QW.QApplication.setAttribute(QC.Qt.AA_EnableHighDpiScaling) qapp = QW.QApplication([APP_NAME]) # Set name of application qapp.setApplicationName(APP_NAME) # Make sure that the application quits when last window closes qapp.lastWindowClosed.connect(qapp.quit, QC.Qt.QueuedConnection) # Initialize main window and draw (show) it main_window = MainWindow() main_window.show() main_window.raise_() main_window.activateWindow() # Replace KeyboardInterrupt error by system's default handler signal.signal(signal.SIGINT, signal.SIG_DFL) # Set MPL's backend to 'Agg' plt.switch_backend('Agg') # Start application qapp.exec_()
4,508
def create_position_tear_sheet(returns, positions, show_and_plot_top_pos=2, hide_positions=False, sector_mappings=None, transactions=None, estimate_intraday='infer', return_fig=False): """ Generate a number of plots for analyzing a strategy's positions and holdings. - Plots: gross leverage, exposures, top positions, and holdings. - Will also print the top positions held. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in create_full_tear_sheet. positions : pd.DataFrame Daily net position values. - See full explanation in create_full_tear_sheet. show_and_plot_top_pos : int, optional By default, this is 2, and both prints and plots the top 10 positions. If this is 0, it will only plot; if 1, it will only print. hide_positions : bool, optional If True, will not output any symbol names. Overrides show_and_plot_top_pos to 0 to suppress text output. sector_mappings : dict or pd.Series, optional Security identifier to sector mapping. Security ids as keys, sectors as values. transactions : pd.DataFrame, optional Prices and amounts of executed trades. One row per trade. - See full explanation in create_full_tear_sheet. estimate_intraday: boolean or str, optional Approximate returns for intraday strategies. See description in create_full_tear_sheet. return_fig : boolean, optional If True, returns the figure that was plotted on. """ positions = utils.check_intraday(estimate_intraday, returns, positions, transactions) if hide_positions: show_and_plot_top_pos = 0 vertical_sections = 7 if sector_mappings is not None else 6 fig = plt.figure(figsize=(14, vertical_sections * 6)) gs = gridspec.GridSpec(vertical_sections, 3, wspace=0.5, hspace=0.5) ax_exposures = plt.subplot(gs[0, :]) ax_top_positions = plt.subplot(gs[1, :], sharex=ax_exposures) ax_max_median_pos = plt.subplot(gs[2, :], sharex=ax_exposures) ax_holdings = plt.subplot(gs[3, :], sharex=ax_exposures) ax_long_short_holdings = plt.subplot(gs[4, :]) ax_gross_leverage = plt.subplot(gs[5, :], sharex=ax_exposures) positions_alloc = pos.get_percent_alloc(positions) plotting.plot_exposures(returns, positions, ax=ax_exposures) plotting.show_and_plot_top_positions( returns, positions_alloc, show_and_plot=show_and_plot_top_pos, hide_positions=hide_positions, ax=ax_top_positions) plotting.plot_max_median_position_concentration(positions, ax=ax_max_median_pos) plotting.plot_holdings(returns, positions_alloc, ax=ax_holdings) plotting.plot_long_short_holdings(returns, positions_alloc, ax=ax_long_short_holdings) plotting.plot_gross_leverage(returns, positions, ax=ax_gross_leverage) if sector_mappings is not None: sector_exposures = pos.get_sector_exposures(positions, sector_mappings) if len(sector_exposures.columns) > 1: sector_alloc = pos.get_percent_alloc(sector_exposures) sector_alloc = sector_alloc.drop('cash', axis='columns') ax_sector_alloc = plt.subplot(gs[6, :], sharex=ax_exposures) plotting.plot_sector_allocations(returns, sector_alloc, ax=ax_sector_alloc) for ax in fig.axes: plt.setp(ax.get_xticklabels(), visible=True) if return_fig: return fig
4,509
def statement_view(request, statement_id=None): """Send a CSV version of the statement with the given ``statement_id`` to the user's browser. """ statement = get_object_or_404( Statement, pk=statement_id, account__user=request.user) response = HttpResponse(mimetype='text/csv') filename = "%s (%s).csv" % (statement.title, statement.from_date.strftime('%B %Y')) response['Content-Disposition'] = 'attachment; filename=%s' % (filename,) writer = csv.writer(response) headings = ["Tag pool name", "Tag name", "Message direction", "Total cost"] writer.writerow(headings) line_item_list = statement.lineitem_set.all() for line_item in line_item_list: writer.writerow([ line_item.tag_pool_name, line_item.tag_name, line_item.message_direction, line_item.total_cost]) return response
4,510
def get_proton_gamma(energy): """Returns relativistic gamma for protons.""" return energy / PROTON_MASS
4,511
def test_login_and_authenticated(server): """ Whitebox test. Check the internal client variables are being set on login. """ add_mock_user(server, 'u1', 'password') # Before Login with server.application.test_request_context('/'): user = server.application.restful_auth.storage.get_client_by_username('u1') assert user.is_authenticated == False assert user.token == None response = server.post( '/user/login', headers={"Authorization": "Basic " + valid_credentials} ) # If the assertion below fails, also check the test_basic_auth_login test. # After Login with server.application.test_request_context('/'): user = server.application.restful_auth.storage.get_client_by_username('u1') assert user.is_authenticated == True assert user.token is not None return
4,512
def generate_heatmap_cube(locus_generator, overlap_df, snp_df, peak_df, heatmap_folder, color_scheme, specific_locus=None, show_plots=True): """ This function is under maintence as in I ran out of time to finish this. Nevertheless. the goal of this function is to show a 3D version of the overlap memberships. Ideally, this will be an active feature once the data for more tissues becomes available. :param locus_generator: the generator for each membership dataframe :param overlap_df: the bedtools output in dataframe form :param snp_df: the imputed snps in dataframe form :param peak_df: the peak desc file in a dataframe :param heatmap_folder: the folder that will contain all the generated heatmaps :param color_scheme: the color scheme list for the heatmap :param specific_locus: if the user wants to focus on a particular locus input a string :param show_plots: a boolean to interactively show each generated heatmap usually safe to keep false :return: none but a 3D image will be generated """ values = map(lambda x: list(x.rgb) + [1], color_scheme) values[3][3] = 0.2 tissues = peak_df["Tissue"].unique() if specific_locus: fig = plt.figure() ax = fig.gca(projection='3d') for i, data in enumerate(locus_generator(overlap_df, snp_df, peak_df, specific_locus=specific_locus, keep_rsq=False)): tissue, current_locus, locus_df = data display_df = locus_df if i == 0: critical_columns = list(peak_df[(peak_df["Tissue"] == tissue) & (peak_df["Priority"] == 1)]["Label"].unique()) display_df = display_df.T.sort_values(critical_columns, ascending=True) display_df = display_df[(display_df[critical_columns] != 0).any(axis=1)] display_df = display_df.T selected_snps = list(display_df.columns) mark_length = len(peak_df["Label"].unique()) snp_length = display_df.shape[1] X = np.arange(0, mark_length, 1) Y = np.arange(0, len(tissues), 1) Z = np.arange(0, snp_length, 1) X, Y, Z = np.meshgrid(X, Y, Z) else: display_df = locus_df[selected_snps] surface_map = np.zeros((mark_length, snp_length, 4)) display_df = display_df + 3 for z, snp in enumerate(display_df.columns): for x, mark in enumerate(display_df.index): location = display_df.loc[mark][snp] surface_map[x][z] = values[location] surf = ax.plot_surface(X[i], Y[i], Z[i], facecolors=surface_map) ax.set_xticklabels(list(display_df.index)) ax.set_yticks(np.arange(0, 8, 1)) ax.set_yticklabels(tissues) ax.set_zticks(np.arange(0, display_df.shape[1], 1)) ax.set_zticklabels(list(display_df.columns)) plt.savefig("{}/{}_3D.png".format(heatmap_folder, locus)) else: for locus in snp_df["Locus"].unique(): fig = plt.figure() fig.subplots_adjust(bottom=0.2) ax = fig.gca(projection='3d') # TODO create a function that will return a list of dataframes and the important snps for i, data in enumerate(locus_generator(overlap_df, snp_df, peak_df, specific_locus=locus, keep_rsq=False)): tissue, current_locus, locus_df = data display_df = locus_df if i == 0: critical_columns = list(peak_df[(peak_df["Tissue"] == tissue) & (peak_df["Priority"] == 1)]["Label"].unique()) display_df = display_df.T.sort_values(critical_columns, ascending=True) display_df = display_df[(display_df[critical_columns] != 0).any(axis=1)] display_df = display_df.T selected_snps = list(display_df.columns) mark_length = len(peak_df["Label"].unique()) snp_length = display_df.shape[1] X = np.arange(0, mark_length, 1) Y = np.arange(0, len(tissues), 1) Z = np.arange(0, snp_length, 1) X, Y, Z = np.meshgrid(X, Y, Z) else: display_df = locus_df[selected_snps] if display_df.empty: break surface_map = np.zeros((mark_length, snp_length, 4)) print surface_map.shape display_df = display_df + 3 for z, snp in enumerate(display_df.columns): for x, mark in enumerate(display_df.index): location = display_df.loc[mark][snp] surface_map[x][z] = values[location] print Y.shape surf = ax.plot_surface(X[i], Y[i], Z[i], facecolors=surface_map) print display_df.shape if display_df.empty or display_df.shape[0] < 2: continue ax.set_xticklabels(list(display_df.index)) ax.set_yticks(np.arange(0, 8, 1)) ax.set_yticklabels(tissues) ax.set_zticks(np.arange(0, display_df.shape[1], 1)) ax.set_zticklabels(list(display_df.columns)) ax.set_title("Locus: {}".format(locus)) plt.savefig("{}/{}_3D.png".format(heatmap_folder, locus)) plt.clf() plt.close()
4,513
def demandNameItem(listDb,phrase2,mot): """ put database name of all items in string to insert in database listDb: list with datbase name of all items phrase2: string with database name of all items mot: database name of an item return a string with database name of all items separated with ',' """ for i in range(len(listDb)): mot = str(listDb[i]) phrase2 += mot if not i == len(listDb)-1: phrase2 += ',' return phrase2
4,514
def escape_yaml(data: str) -> str: """ Jinja2 фильтр для экранирования строк в yaml экранирует `$` """ return data.replace("$", "$$")
4,515
def test_empty_result(): """Test the client when POST to tefas returns empty list""" Crawler._do_post = MagicMock(return_value=[]) crawler = Crawler() crawler.fetch(start="2020-11-20")
4,516
def stokes_linear(theta): """Stokes vector for light polarized at angle theta from the horizontal plane.""" if np.isscalar(theta): return np.array([1, np.cos(2*theta), np.sin(2*theta), 0]) theta = np.asarray(theta) return np.array([np.ones_like(theta), np.cos(2*theta), np.sin(2*theta), np.zeros_like(theta)]).T
4,517
def validate_item_field(attr_value, attr_form): """ :param attr_value: item的属性 :param attr_form: item category的属性规则 :return: """ if not isinstance(attr_form, dict): return -1, {"error": "attr_form is not a dict."} required = attr_form.get('required') if required == 'false': return 0, {"msg": "success"} field = attr_form.get('field') if not field: return -1, {"error": "field missed."} if field == "string": if not isinstance(attr_value, str): return -1, {"error": "attr_value is not a string."} if len(attr_value) < int(attr_form["min_length"]) or len(attr_value) > int(attr_form["max_length"]): return -1, {"error": "invalid string length."} if attr_form.get('valid_rule') == "none": return 0, {"msg": "success"} elif attr_form.get('valid_rule') == "IPaddress": pattern = re.compile(r'\d+\.\d+\.\d+\.\d+') # 匹配IP地址有待改进 elif attr_form.get('valid_rule') == "email": pattern = re.compile(r'^(\w)+(\.\w+)*@(\w)+((\.\w+)+)$') elif attr_form.get('valid_rule') == "phone": pattern = re.compile(r'^\d{11}$') else: return -1, {"error": "invalid valid_rule."} match = pattern.match(attr_value) if not match: return -1, {"error": "did not match rule: %s" % attr_form.get('valid_rule')} elif field == "text": if not isinstance(attr_value, str): return -1, {"error": "attr_value is not a string."} if len(attr_value) < int(attr_form["min_length"]) or len(attr_value) > int(attr_form["max_length"]): return -1, {"error": "invalid string length."} elif field == "select": if not isinstance(attr_value, str): return -1, {"error": "attr_value is not a dict."} if attr_value not in attr_form["choice"][1:-1].split("|"): return -1, {"error": "invalid choice."} elif field == "multiple_select": if not isinstance(attr_value, str): return -1, {"error": "attr_value is not a dict."} for each in attr_value.split("|"): if each not in attr_form["choice"][1:-1].split("|"): return -1, {"error": "invalid choice."} elif field == "integer": if not isinstance(attr_value, int): return -1, {"error": "attr_value is not a integer."} if attr_value < int(attr_form["min_value"]) or attr_value > int(attr_form["max_value"]): return -1, {"error": "invalid integer value."} elif field == "datetime": if not isinstance(attr_value, str): return -1, {"error": "attr_value is not a string."} try: date_object = datetime.datetime.strptime( attr_value, '%Y%m%d%H%M%S') except ValueError: return -1, {"error": "time data '%s' does not match format" % attr_value} elif field == "reference": if not isinstance(attr_value, str): return -1, {"error": "attr_value is not a string."} item_obj = Item.objects(id=attr_value) if not item_obj: return -1, {"error": "unknown item."} if item_obj.category.id != attr_form["reference"]: return -1, {"error": "wrong category."} return 0, {"msg": "success"}
4,518
def display_instances(image, boxes, masks, ids, names, scores): """ take the image and results and apply the mask, box, and Label """ n_instances = boxes.shape[0] colors = random_colors(n_instances) if not n_instances: print('NO INSTANCES TO DISPLAY') else: assert boxes.shape[0] == masks.shape[-1] == ids.shape[0] for i, color in enumerate(colors): # we want the colours to only ne in one color: SIFR orange ff5722 # color = (255, 87, 34) if not np.any(boxes[i]): continue y1, x1, y2, x2 = boxes[i] label = names[ids[i]] score = scores[i] if scores is not None else None caption = '{} {:.2f}'.format(label, score) if score else label mask = masks[:, :, i] image = apply_mask(image, mask, color) image = cv2.rectangle(image, (x1, y1), (x2, y2), color, 2) image = cv2.putText( image, caption, (x1, y1), cv2.FONT_HERSHEY_COMPLEX, 0.7, color, 2 ) return image
4,519
def run(): """ Run game. """ words = load_words(WORDFILE) wrangler = provided.WordWrangler(words, remove_duplicates, intersect, merge_sort, gen_all_strings) provided.run_game(wrangler)
4,520
def read_squad_examples(input_file, is_training): """Read a SQuAD json file into a list of SquadExample.""" with tf.io.gfile.GFile(input_file, "r") as reader: input_data = json.load(reader)["data"] examples = [] for entry in input_data: for paragraph in entry["paragraphs"]: paragraph_text = paragraph["context"] for qa in paragraph["qas"]: qas_id = qa["id"] question_text = qa["question"] start_position = None orig_answer_text = None is_impossible = False if is_training: if FLAGS.train_version == "v2": is_impossible = qa["is_impossible"] if (len(qa["answers"]) != 1) and (not is_impossible): raise ValueError( "For training, each question should have exactly 1 answer.") if not is_impossible: answer = qa["answers"][0] orig_answer_text = answer["text"] start_position = answer["answer_start"] else: start_position = -1 orig_answer_text = "" example = SquadExample( qas_id=qas_id, question_text=question_text, paragraph_text=paragraph_text, orig_answer_text=orig_answer_text, start_position=start_position, is_impossible=is_impossible) examples.append(example) return examples
4,521
def load_gamma_telescope(): """https://archive.ics.uci.edu/ml/datasets/MAGIC+Gamma+Telescope""" url='https://archive.ics.uci.edu/ml/machine-learning-databases/magic/magic04.data' filepath = os.path.join(get_data_dir(), "magic04.data") maybe_download(filepath, url) data = pd.read_csv(filepath) data.columns = ['fLength', 'fWidth', 'fSize', 'fConc', 'fConc1', 'fAsym', 'fM3Long', 'fM3Trans', 'fAlpha', 'fDist', 'class'] X = data.drop(['class'], axis=1) y = data['class'] return X, y
4,522
def qarange(start, end, step): """ Convert the cyclic measurement and control data into the required array :param start: :param end: :param step: :return: np.array """ if Decimal(str(end)) - Decimal(str(start)) < Decimal(str(step)) or step == 0: return [start] start_decimal = str(start)[::-1].find('.') step_decimal = str(step)[::-1].find('.') data_decimal = max([step_decimal, start_decimal]) if data_decimal == -1: data_decimal = 0 data_number = int((Decimal(str(end)) - Decimal(str(start))) / Decimal(str(step))) end_data = round(start + data_number * step, data_decimal) data_np = np.linspace(start, end_data, data_number + 1) data_list = [round(data, data_decimal) for data in data_np] return data_list
4,523
def checkFull(t, e, maxval, minval): """Check the maximum and minimum bounds for the type given by e""" checkType(t, e, maxval, 1) checkType(t, e, minval, -1)
4,524
def get_default_render_layer(): """Returns the default render layer :return: """ return pm.ls(type='renderLayer')[0].defaultRenderLayer()
4,525
def translation(shift): """Translation Matrix for 2D""" return np.asarray(planar.Affine.translation(shift)).reshape(3, 3)
4,526
def pad(adjacency_matrices, size): """Pads adjacency matricies to the desired size This will pad the adjacency matricies to the specified size, appending zeros as required. The output adjacency matricies will all be of size 'size' x 'size'. Args: adjacency_matrices: The input list of adjacency matricies. size: The desired dimension of the output matricies. Returns: The resulting list of adjacency matricies. """ padding = size - adjacency_matrices.shape[1] return np.pad(adjacency_matrices, [(0, 0), (0, padding), (0, padding)], mode='constant')
4,527
def test_merge_configuration(): """ Test merging two simple configurations works as expected. """ one = {"alpha": [0, 1], BernoulliNB: {"fit_prior": [True, False]}} two = {"alpha": [0, 2], GaussianNB: {"fit_prior": [True, False]}} expected_merged = { "alpha": [0, 1, 2], GaussianNB: {"fit_prior": [True, False]}, BernoulliNB: {"fit_prior": [True, False]}, } actual_merged = merge_configurations(one, two) assert expected_merged == actual_merged
4,528
def test_launch_with_longer_multiword_domain_name() -> None: """This test is important because we want to make it convenient for users to launch nodes with an arbitrary number of words.""" # COMMAND: "hagrid launch United Nations" args: List[str] = ["United", "States", "of", "America"] verb = cli.get_launch_verb() grammar = cli.parse_grammar(args=tuple(args), verb=verb) verb.load_grammar(grammar=grammar) cmd = cli.create_launch_cmd(verb=verb, kwargs={}, ignore_docker_version_check=True) print(cmd) # check that it's a domain by default assert "NODE_TYPE=domain" in cmd # check that the node has a name assert "DOMAIN_NAME='united_states_of_america'" in cmd # check that tail is on by default assert " -d " not in cmd
4,529
def mapping_activities_from_log(log, name_of_activity): """ Returns mapping activities of activities. :param name_of_activity: :param log: :return: mapping """ mapping_activities = dict() unique_activities = unique_activities_from_log(log, name_of_activity) for index, activity in enumerate(unique_activities): mapping_activities[activity] = index return mapping_activities
4,530
def bubble_sort(nums) -> List[int]: """Sorts numbers from in ascending order. It is: - quadratic time - constant space - stable - iterative - mutative - internal algorithm """ # TODO: implement the algorithm! pass
4,531
def pad(x, paddings, axes=None): """ Pads a tensor with zeroes along each of its dimensions. TODO: clean up slice / unslice used here Arguments: x: the tensor to be padded paddings: the length of the padding along each dimension. should be an array with the same length as x.axes. Each element of the array should be either an integer, in which case the padding will be symmetrical, or a tuple of the form (before, after) axes: the axes to be given to the padded tensor. If unsupplied, we create new axes of the correct lengths. Returns: TensorOp: symbolic expression for the padded tensor """ if len(x.axes) != len(paddings): raise ValueError(( "pad's paddings has length {pad} which needs to be the same " "as the number of axes in x ({x})" ).format( pad=len(paddings), x=len(x.axes), )) def pad_to_tuple(pad): if isinstance(pad, int): pad = (pad, pad) return pad def to_slice(pad): s = (pad[0], -pad[1]) s = tuple(None if p == 0 else p for p in s) return slice(s[0], s[1], 1) paddings = tuple(pad_to_tuple(pad) for pad in paddings) if axes is None: axes = make_axes( make_axis(length=axis.length + pad[0] + pad[1], name=axis.name) if pad != (0, 0) else axis for axis, pad in zip(x.axes, paddings) ) slices = tuple(to_slice(p) for p in paddings) return _unslice(x, slices, axes)
4,532
def labels_to_1hotmatrix(labels, dtype=int): """ Maps restricted growth string to a one-hot flag matrix. The input and the output are equivalent representations of a partition of a set of n elelements. labels: restricted growth string: n-vector with entries in {0,...,n-1}. The first entry is 0. Other entries cannot exceed any previous entry by more than 1. dtype: optional, default=int. Element data type for returned matrix. bool or float can also be used. Returns (m,n) matrix, with 0/1 entries, where m is the number of blocks in the partition and n is the numer of elements in the partitioned set. Columns are one-hot. If return_matrix[i,j], then element j is in block i. """ m = 1 + labels.max() B = np.arange(m).reshape(-1,1) == labels return B.astype(dtype,copy=False)
4,533
def model_counter_increment(instance): """Word Count map function.""" instance.counter += 1 instance.save() yield (instance.pk, "{0}".format(instance.counter))
4,534
def gen_cues_adp(model_type, thresh, batch_size, size, cues_dir, set_name, is_verbose): """Generate weak segmentation cues for ADP (helper function) Parameters ---------- model_type : str The name of the model to use for generating cues (i.e. 'M1', 'M2', 'M3', 'M4', 'M5', 'M6', 'M7', 'X1.7', 'M7', 'M7bg', 'VGG16', or 'VGG16bg') thresh: float Confidence value for thresholding activation maps [0-1] batch_size : int The batch size (>0) size : int The length of the resized input image cues_dir : str The directory to save the cues to set_name : str The name of the name of the evaluation set (i.e. 'tuning' or 'segtest') is_verbose : bool, optional Whether to activate message verbosity """ ac = ADPCues('ADP_' + model_type, batch_size, size, model_dir=MODEL_ROOT) seed_size = 41 # Load network and thresholds cues_dirs = {} for htt_class in ['morph', 'func']: cues_dirs[htt_class] = os.path.join(cues_dir, htt_class) makedir_if_nexist([cues_dirs[htt_class]]) ac.build_model() # Load Grad-CAM weights if not os.path.exists('data'): os.makedirs('data') if is_verbose: print('\tGetting Grad-CAM weights for given network') alpha = ac.get_grad_cam_weights(np.zeros((1, size, size, 3))) # Read in image names img_names = ac.get_img_names(set_name) # Process images in batches n_batches = len(img_names) // batch_size + 1 for iter_batch in range(n_batches): start_time = time.time() if is_verbose: print('\tBatch #%d of %d' % (iter_batch + 1, n_batches)) start_idx = iter_batch * batch_size end_idx = min(start_idx + batch_size - 1, len(img_names) - 1) cur_batch_sz = end_idx - start_idx + 1 img_batch_norm, img_batch = ac.read_batch(img_names[start_idx:end_idx + 1]) # Determine passing classes predicted_scores = ac.model.predict(img_batch_norm) is_pass_threshold = np.greater_equal(predicted_scores, ac.thresholds) # Generate Grad-CAM H = ac.grad_cam(alpha, img_batch_norm, is_pass_threshold) H = np.transpose(H, (0, 3, 1, 2)) H = resize_stack(H, (seed_size, seed_size)) # Split Grad-CAM into {morph, func} H_split = {} H_split['morph'], H_split['func'] = ac.split_by_httclass(H) is_pass = {} is_pass['morph'], is_pass['func'] = ac.split_by_httclass(is_pass_threshold) # Modify Grad-CAM for each HTT type separately seeds = {} for htt_class in ['morph', 'func']: seeds[htt_class] = np.zeros((cur_batch_sz, len(ac.classes['valid_' + htt_class]), seed_size, seed_size)) seeds[htt_class][:, ac.classinds[htt_class + '2valid']] = H[:, ac.classinds['all2' + htt_class]] class_inds = [ac.classinds_arr[htt_class + '2valid'][is_pass[htt_class][i]] for i in range(cur_batch_sz)] # Modify heatmaps if htt_class == 'morph': seeds[htt_class] = ac.modify_by_htt(seeds[htt_class], img_batch, ac.classes['valid_' + htt_class]) elif htt_class == 'func': class_inds = [np.append(1, x) for x in class_inds] adipose_inds = [i for i, x in enumerate(ac.classes['morph']) if x in ['A.W', 'A.B', 'A.M']] gradcam_adipose = seeds['morph'][:, adipose_inds] seeds[htt_class] = ac.modify_by_htt(seeds[htt_class], img_batch, ac.classes['valid_' + htt_class], gradcam_adipose=gradcam_adipose) # Update localization cues ac.update_cues(seeds[htt_class], class_inds, htt_class, list(range(start_idx, end_idx + 1)), thresh) elapsed_time = time.time() - start_time if is_verbose: print('\t\tElapsed time: %s seconds (%s seconds/image)' % (elapsed_time, elapsed_time / cur_batch_sz)) # Save localization cues if is_verbose: print('\tSaving localization cues') pickle.dump(ac.cues['morph'], open(os.path.join(cues_dirs['morph'], 'localization_cues.pickle'), 'wb')) pickle.dump(ac.cues['func'], open(os.path.join(cues_dirs['func'], 'localization_cues.pickle'), 'wb'))
4,535
def recall(logits, target, topk=[1,5,10], typeN=8): """Compute top K recalls of a batch. Args: logits (B x max_entities, B x max_entities x max_rois): target (B x max_entities, B x max_entities x max_rois): topk: top k recalls to compute Returns: N: number of entities in the batch TPs: topk true positives in the batch bound: max number of groundable entities """ logits, target, N, types = select(logits, target) topk = [topk] if isinstance(topk, int) else sorted(topk) TPs = [0] * len(topk) bound = target.max(-1, False)[0].sum().item() # at least one detected typeTPs = th.zeros(typeN, device=types.device) typeN = th.zeros_like(typeTPs) #print("target entity type count: ", types.shape, types.sum(dim=0), target.shape) if max(topk) == 1: top1 = th.argmax(logits, dim=1) one_hots = th.zeros_like(target) one_hots.scatter_(1, top1.view(-1, 1), 1) TPs = (one_hots * target).sum().item() hits = (one_hots * target).sum(dim=1) >= 1 typeTPs += types[hits].sum(dim=0) typeN += types.sum(dim=0) else: logits = th.sort(logits, 1, descending=True)[1] for i, k in enumerate(topk): one_hots = th.zeros_like(target) one_hots.scatter_(1, logits[:, :k], 1) TPs[i] = ((one_hots * target).sum(dim=1) >= 1).float().sum().item() # hit if at least one matched if i == 0: hits = (one_hots * target).sum(dim=1) >= 1 typeTPs += types[hits].sum(dim=0) typeN += types.sum(dim=0) #print(TPs, N) #print(typeTPs) #print(typeN) return N, th.Tensor(TPs + [bound]), (typeTPs.cpu(), typeN.cpu())
4,536
def n(request) -> int: """A test fixture enumerate values for `n`.""" return request.param
4,537
def create_color_lot(x_tick_labels, y_tick_labels, data, xlabel, ylabel, colorlabel, title): """ Generate 2D plot for the given data and labels """ fig, ax = plot.subplots() heatmap = ax.pcolor(data) colorbar = plot.colorbar(heatmap) colorbar.set_label(colorlabel, rotation=90) ax.set_xticks(arange(len(x_tick_labels)) + 0.5, minor=False) ax.set_yticks(arange(len(y_tick_labels)) + 0.5, minor=False) ax.set_xticklabels(x_tick_labels, minor=False) ax.set_yticklabels(y_tick_labels, minor=False) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) plot.title(title) plot.show() # plot.savefig(title+".png") # fig.clf() # fig.clear() # ax.cla() # ax.clear() # plot.cla() # plot.clf() # plot.close()
4,538
def decode(ciphered_text): """ Decodes the ciphered text into human readable text. Returns a string. """ text = ciphered_text.replace(' ', '') # We remove all whitespaces return ''.join([decode_map[x] if decode_map.get(x) else x for x in text])
4,539
def _collect_fields(resource): """Collect fields from the JSON. :param resource: ResourceBase or CompositeField instance. :returns: generator of tuples (key, field) """ for attr in dir(resource.__class__): field = getattr(resource.__class__, attr) if isinstance(field, Field): yield (attr, field)
4,540
def parse(s): """ Date parsing tool. Change the formats here cause a changement in the whole application. """ formats = ['%Y-%m-%dT%H:%M:%S.%fZ','%d/%m/%Y %H:%M:%S','%d/%m/%Y%H:%M:%S', '%d/%m/%Y','%H:%M:%S'] d = None for format in formats: try: d = datetime.strptime(s, format) break except ValueError: pass return d
4,541
def openstack(request): """ Context processor necessary for OpenStack Dashboard functionality. The following variables are added to the request context: ``authorized_tenants`` A list of tenant objects which the current user has access to. ``regions`` A dictionary containing information about region support, the current region, and available regions. """ context = {} # Auth/Keystone context context.setdefault('authorized_tenants', []) current_dash = request.horizon['dashboard'] needs_tenants = getattr(current_dash, 'supports_tenants', False) if request.user.is_authenticated() and needs_tenants: context['authorized_tenants'] = request.user.authorized_tenants # Region context/support available_regions = getattr(settings, 'AVAILABLE_REGIONS', []) regions = {'support': len(available_regions) > 1, 'current': {'endpoint': request.session.get('region_endpoint'), 'name': request.session.get('region_name')}, 'available': [{'endpoint': region[0], 'name':region[1]} for region in available_regions]} context['regions'] = regions context['cluster'] = {'title': "Cluster"} return context
4,542
def get_align_mismatch_pairs(align, ref_genome_dict=None) -> list: """input a pysam AlignedSegment object Args: align (pysam.AlignedSeqment object): pysam.AlignedSeqment object ref_genome_dict (dict, optional): returned dict from load_reference_fasta_as_dict(). Defaults to None. Returns: list/None: it returns mismatch_pair_list, just like [ref_index, align_index, ref_base, align_base]; and the "ref_index" is the same coordinate with UCSC genome browser; When NM == 0, it returns None. """ # No mismatch try: if align.get_tag("NM") == 0: return None except: return None MD_tag_state = align.has_tag("MD") if MD_tag_state: # parse softclip, insertion and deletion info_index_list = [] accu_index = 0 for cigar_type, cigar_len in align.cigartuples: if cigar_type == 1 or cigar_type == 4: info_index_list.append((accu_index + 1, cigar_len)) elif cigar_type == 2: info_index_list.append((accu_index + 1, -cigar_len)) accu_index += cigar_len # parse MD tag mismatch_pair_list = [] cur_base = "" cur_index = 0 bases = align.get_tag("MD") i = 0 while i < len(bases): base = bases[i] if base.isdigit(): cur_base += base i += 1 else: cur_index += int(cur_base) cur_base = "" if base == "^": i += 1 del_str = "" while (bases[i].isalpha()) and (i < len(bases)): del_str += bases[i] i += 1 cur_index += len(del_str) del_str = "" elif base.isalpha(): cur_index += 1 ref_base = base i += 1 # add into list fix_index = cur_index + back_indel_shift(info_index_list, cur_index) if fix_index < len(align.query_sequence): mismatch_pair_list.append( [ cur_index + align.reference_start, cur_index - 1, ref_base, align.query_sequence[fix_index - 1], ] ) else: return None return mismatch_pair_list else: mismatch_pair_list = [] for align_idx, ref_idx in align.get_aligned_pairs(): if (align_idx is not None) and (ref_idx is not None): align_base = align.query_sequence[align_idx] ref_base = ref_genome_dict[align.reference_name][ref_idx] if align_base != ref_base: mismatch_pair_list.append( [ref_idx + 1, align_idx, ref_base, align_base] ) return mismatch_pair_list
4,543
def get_int(name, default=None): """ :type name: str :type default: int :rtype: int """ return int(get_parameter(name, default))
4,544
def display_instances(image, boxes, masks, keypoints, class_id=1, class_name='person', scores=None, title="", figsize=(16, 16), ax=None, show_mask=True, show_bbox=True, show_keypoint=True, colors=None, captions=None): """ boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates. masks: [height, width, num_instances] class_ids: 1 for person class_name: class name of the dataset scores: (optional) confidence scores for each box title: (optional) Figure title show_mask, show_bbox: To show masks and bounding boxes or not figsize: (optional) the size of the image colors: (optional) An array or colors to use with each object captions: (optional) A list of strings to use as captions for each object """ # Number of instances N = boxes.shape[0] if not N: print("\n*** No instances to display *** \n") else: assert boxes.shape[0] == masks.shape[0] # If no axis is passed, create one and automatically call show() auto_show = False if not ax: _, ax = plt.subplots(1, figsize=figsize) auto_show = True # Generate random colors colors = colors or random_colors(N) # Show area outside image boundaries. height, width = image.shape[:2] ax.set_ylim(height + 10, -10) ax.set_xlim(-10, width + 10) ax.axis('off') ax.set_title(title) masked_image = image.astype(np.uint32).copy() for i in range(N): color = colors[i] # Bounding box if not np.any(boxes[i]): # Skip this instance. Has no bbox. Likely lost in image cropping. continue y1, x1, y2, x2 = boxes[i] if show_bbox: p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2, alpha=0.7, linestyle="dashed", edgecolor=color, facecolor='none') ax.add_patch(p) # Label if not captions: class_id = class_id score = scores[i] if scores is not None else None label = class_name caption = "{} {:.3f}".format(label, score) if score else label else: caption = captions[i] ax.text(x1, y1 + 8, caption, color='w', size=11, backgroundcolor="none") # Mask mask = masks[i, :, :] keypoint = keypoints[i] if show_mask: masked_image = apply_mask(masked_image, mask, color) # Mask Polygon # Pad to ensure proper polygons for masks that touch image edges. padded_mask = np.zeros( (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8) padded_mask[1:-1, 1:-1] = mask contours = find_contours(padded_mask, 0.5) for verts in contours: # Subtract the padding and flip (y, x) to (x, y) verts = np.fliplr(verts) - 1 p = Polygon(verts, facecolor="none", edgecolor=color) ax.add_patch(p) if show_keypoint: masked_image = apply_keypoint(masked_image, keypoint) ax.imshow(masked_image.astype(np.uint8)) if auto_show: plt.show()
4,545
def run_test(series: pd.Series, randtest_name, **kwargs) -> TestResult: """Run a statistical test on RNG output Parameters ---------- series : ``Series`` Output of the RNG being tested randtest_name : ``str`` Name of statistical test **kwargs Keyword arguments to pass to statistical test Returns ------- result : ``TestResult`` or ``MultiTestResult`` Data containers of the test's result(s). Raises ------ TestNotFoundError If `randtest_name` does not match any available statistical tests TestError Errors raised when running ``randtest_name`` """ try: func = getattr(_randtests, randtest_name) except AttributeError as e: raise TestNotFoundError() from e with Progress(*columns, console=console, transient=True) as progress: abbrv = f_randtest_abbreviations[randtest_name] task = progress.add_task(abbrv) try: result = func(series, ctx=(progress, task), **kwargs) color = "yellow" if result.failures else "green" print_randtest_name(randtest_name, color) console.print(result) return result except TestError as e: print_randtest_name(randtest_name, "red") print_error(e) raise e
4,546
def radians(x): """ Convert degrees to radians """ if isinstance(x, UncertainFunction): mcpts = np.radians(x._mcpts) return UncertainFunction(mcpts) else: return np.radians(x)
4,547
def screen_poisson_objective(pp_image, hp_w,hp_b, data): """Objective function.""" return (stencil_residual(pp_image, hp_w,hp_b, data) ** 2).sum()
4,548
def remove_auto_shutdown_jobs(): """ clears the job scheduler off auto shutdown jobs """ jobs = schedule.get_jobs("auto_off") if jobs: #print(jobs) schedule.clear("auto_off") #print("auto shutdown cancelled")
4,549
async def test_no_port(event_loop, test_log, mock_server_command): """Check that process that didn't bind any endpoints is handled correctly. """ # Case 1: process didn't bind an endpoint in time. PORT_TIMEOUT = 0.5 process = polled_process.PolledProcess( mock_server_command(socket_count=0), None ) with pytest.raises(polled_process.ProcessStartTimeoutError): await process.start(port_timeout=PORT_TIMEOUT) # Case 2: process exits without binding an endpoint. process = polled_process.PolledProcess( mock_server_command(socket_count=0, exit_delay=0), None ) with pytest.raises(polled_process.ProcessExitedError): await process.start()
4,550
def learn(request, artwork_genre=None): """ Returns an art genre. """ _genre = get_object_or_404(Genre, slug=artwork_genre) return render_to_response('t_learn.html', {'genre': _genre}, context_instance=RequestContext(request))
4,551
def plot_corr_matrix(corr_dat, labels, save_out=False, fig_info=FigInfo()): """Plot correlation data. Parameters ---------- corr_data : 2d array Matrix of correlation data to plot. labels : list of str Labels for the rows & columns of `corr_data`. save_out : boolean, optional (default = False) Whether to save out a copy of the figure. """ # Plot Settings t_fs = fig_info.t_fs # Title Font Size ti_fs = fig_info.ti_fs # Axis ticks font size # Set colormap to use cmap = plt.get_cmap('seismic') # Create the plot #im = plt.matshow(corr_data, vmin=-1, vmax=1, cmap=cmap, interpolation='none') im = plt.matshow(corr_data, vmin=-1, vmax=1, cmap=cmap, interpolation='nearest') # Notes on using nearest here: # https://github.com/matplotlib/matplotlib/issues/2972/ # Add title if fig_info.add_title: plt.title('Osc Band Correlations', {'fontsize': t_fs, 'fontweight': 'bold'}, y=1.15) # Set tick labels nums = list(range(len(labels))) plt.xticks(nums, labels, rotation=45, ha='left') plt.yticks(nums, labels) # Set ticks font size plt.tick_params(axis='both', which='major', labelsize=ti_fs) # Add a colorbar - add padding to offset further from plot plt.colorbar(pad=0.15) save_figure(save_out, '106-CorrMat', fig_info)
4,552
def plot_column(path: str, column: str, outpath: str = ""): """Plot a single column and save to file.""" df = to_df(path) col_df = df.set_index(["name", "datetime"])[column].unstack("name") ax = col_df.plot(grid=True) ax.set_xlabel("Time") ax.set_ylabel(LABEL_MAP[column]) if outpath: ax.get_figure().savefig(outpath, bbox_inches="tight") return ax
4,553
def get_loader(): """Returns torch.utils.data.DataLoader for custom Pypipes dataset. """ data_loader = None return data_loader
4,554
def attention_decoder_cell_fn(decoder_rnn_cell, memories, attention_type, decoder_type, decoder_num_units, decoder_dropout, mode, batch_size, beam_width=1, decoder_initial_state=None, reuse=False): """Create an decoder cell with attention. It takes decoder cell as argument Args: - memories: (encoder_outputs, encoder_state, input_length) tuple - attention_type: "luong", "bahdanau" - mode: "train", "test" """ if mode == "train": beam_width = 1 with tf.variable_scope('attention_decoder_cell', reuse=reuse): attention_mechanisms = [] attention_layers = [] for idx, (encoder_outputs, encoder_state, input_length) in enumerate(memories): # Tile batch for beam search, if beam_width == 1, then nothing happens encoder_outputs, input_length, encoder_state, beam_batch_size = prepare_beam_search_decoder_inputs( beam_width, encoder_outputs, input_length, encoder_state, batch_size) # Temporal attention along time step if attention_type == "luong": attention_mechanism = tf.contrib.seq2seq.LuongAttention( decoder_num_units, memory=encoder_outputs, memory_sequence_length=input_length) elif attention_type == "bahdanau": attention_mechanism = tf.contrib.seq2seq.BahdanauAttention( decoder_num_units, memory=encoder_outputs, memory_sequence_length=input_length) attention_layer = tf.layers.Dense(decoder_num_units, name="{}th_attention".format(idx), use_bias=False, dtype=tf.float32, _reuse=reuse) attention_mechanisms.append(attention_mechanism) attention_layers.append(attention_layer) #decoder_rnn_cell = single_rnn_cell(decoder_type, decoder_num_units, decoder_dropout, mode, reuse=reuse) attention_rnn_cell = tf.contrib.seq2seq.AttentionWrapper( decoder_rnn_cell, attention_mechanisms, attention_layer=attention_layers, initial_cell_state=None, name="AttentionWrapper") # Set decoder initial state initial_state = attention_rnn_cell.zero_state(dtype=tf.float32, batch_size=beam_batch_size) if decoder_initial_state: decoder_initial_state = tf.contrib.seq2seq.tile_batch(decoder_initial_state, multiplier=beam_width) initial_state = initial_state.clone(cell_state=decoder_initial_state) return attention_rnn_cell, initial_state
4,555
def scan_recurse(path: PrettyPath, _resolved: ResolvedDotfilesJson) -> ActionResult: """Scan action: Recurse into this directory for more dotfiles. """
4,556
def test_json_to_spark_schema_invalid(invalid_schema, missed_key): """json_to_spark_schema should raise KeyError for missing key.""" # Arrange & Act with pytest.raises(KeyError) as key_error: json_to_spark_schema(create_json_schema(invalid_schema)) # Assert assert "Missing key: '{0}'. Valid format: {1}".format( missed_key, "All schema columns must have a name, type and nullable key" ) in str(key_error)
4,557
async def post_user_income(user: str, income: Income): """ This functions create a new income in the DB. It checks whether the user exists and returns a message in case no user exists. In the other case, creates a new document in DB with the users new Income. user: users uuid. income: Income (check pyndatic model) parameters to save in DB. """ user_bool = user_exist(user) if user_bool: income_created = await create_new_income(income) if income_created: return {"Message": "sucesful", "payload": "Income creates sucessfully."} else: return {"Message": "error", "payload": "There was an error creating Income."} else: return {"Message": "error", "payload": "User not found."}
4,558
def process_axfr_response(origin, nameserver, owner, overwrite=False): """ origin: string domain name nameserver: IP of the DNS server """ origin = Name((origin.rstrip('.') + '.').split('.')) axfr_query = dns.query.xfr(nameserver, origin, timeout=5, relativize=False, lifetime=10) try: zone = dns.zone.from_xfr(axfr_query, relativize=False) if not str(zone.origin).rstrip('.'): raise UnknownOrigin process_and_import_zone_data(zone, owner, overwrite) except NoSOA: raise Exception('The zone has no SOA RR at its origin') except NoNS: raise Exception('The zone has no NS RRset at its origin') except UnknownOrigin: raise Exception('The zone\'s origin is unknown') except BadZone: raise Exception('The zone is malformed') except DNSException, e: if not str(e): raise Exception('Transfer Failed') raise Exception(str(e))
4,559
def bind_rng_to_host_device(rng: jnp.ndarray, axis_name: str, bind_to: Optional[str] = None) -> jnp.ndarray: """Binds a rng to the host/device we are on. Must be called from within a pmapped function. Note that when binding to "device", we also bind the rng to hosts, as we fold_in the rng with axis_index which is unique for devices across all hosts. Args: rng: A jax.random.PRNGKey. axis_name: The axis of the devices we are binding rng across. bind_to: Must be one of the 'host' or 'device'. None means no binding. Returns: jax.random.PRNGKey specialized to host/device. """ if bind_to is None: return rng if bind_to == 'host': return jax.random.fold_in(rng, jax.process_index()) elif bind_to == 'device': return jax.random.fold_in(rng, jax.lax.axis_index(axis_name)) else: raise ValueError( "`bind_to` should be one of the `[None, 'host', 'device']`")
4,560
async def snobpic(ctx): """command for personalised profile picture, input a color (RGB or HEX) output a reply with the profile picture""" await snobBot.snobpic(ctx)
4,561
def get_child_ids(pid, models, myself=True, ids: set = None) -> set: """ 获取models模型的子id集合 :param pid: models模型类ID :param models: models模型对象 :param myself: 是否包含pid :param ids: 所有ID集合(默认为None) :return: ids(所有ID集合) """ if ids is None: ids = set() queryset = models.objects.filter(pid=pid) for instance in queryset: ids.add(instance.id) get_child_ids(instance.id, models, myself, ids) if myself: ids.add(pid) return ids
4,562
def get_tenant_id(khoros_object, community_details=None): """This function retrieves the tenant ID of the environment. .. versionadded:: 2.1.0 :param khoros_object: The core :py:class:`khoros.Khoros` object :type khoros_object: class[khoros.Khoros] :param community_details: Dictionary containing community details (optional) :type community_details: dict, None :returns: The tenant ID in string format :raises: :py:exc:`khoros.errors.exceptions.GETRequestError` """ return get_community_field(khoros_object, 'id', community_details)
4,563
def pretty_print_output(critter, matches, contigs, pd, mc, mp): """Write some nice output to stdout""" unique_matches = sum([1 for node, uce in matches.iteritems()]) out = "\t {0}: {1} ({2:.2f}%) uniques of {3} contigs, {4} dupe probe matches, " + \ "{5} UCE probes matching multiple contigs, {6} contigs matching multiple UCE probes" print out.format( critter, unique_matches, float(unique_matches) / contigs * 100, contigs, len(pd), len(mp), len(mc) )
4,564
def obfuscatable_variable(tokens, index): """ Given a list of *tokens* and an *index* (representing the current position), returns the token string if it is a variable name that can be safely obfuscated. Returns '__skipline__' if the rest of the tokens on this line should be skipped. Returns '__skipnext__' if the next token should be skipped. If *ignore_length* is ``True``, even variables that are already a single character will be obfuscated (typically only used with the ``--nonlatin`` option). """ tok = tokens[index] token_type = tok[0] token_string = tok[1] line = tok[4] if token_type != tokenize.NAME: return None # Skip this token if token_string in analyze.storageLocation_scope_words: return None # Skip this token if token_string == "pragma" or token_string == "import": return "__skipline__" if token_string == '_': return None # skipnext = ['(', ')', '{', '}', ';'] # if token_string in skipnext: # return '__skipnext__' if index > 0: prev_tok = tokens[index-1] else: # Pretend it's a newline (for simplicity) prev_tok = (54, '\n', (1, 1), (1, 2), '#\n') prev_tok_type = prev_tok[0] prev_tok_string = prev_tok[1] if index > 1: pre_prev_tok = tokens[index-2] else: # Pretend it's a newline (for simplicity) pre_prev_tok = (54, '\n', (1, 1), (1, 2), '#\n') pre_prev_tok_type = pre_prev_tok[0] pre_prev_tok_string = pre_prev_tok[1] try: next_tok = tokens[index+1] except IndexError: # Pretend it's a newline next_tok = (54, '\n', (1, 1), (1, 2), '#\n') next_tok_string = next_tok[1] # if token_string == "=": # return '__skipline__' if prev_tok_string == '.' and pre_prev_tok_string in ('msg', 'abi', 'block', 'tx'): return None if prev_tok_string == '.' and token_string in ('balance', 'send', 'transfer'): return None #if token_string.startswith('__'): # return None if next_tok_string == ".": if token_string in analyze.global_variable: return None #if prev_tok_string == 'import': # return '__skipline__' # if prev_tok_string == ".": # return '__skipnext__' if prev_tok_string in analyze.type_words: # declare variable return token_string if prev_tok_string in analyze.storageLocation_scope_words and pre_prev_tok_string in analyze.type_words: # declare variable return token_string if token_string[0:5] == 'fixed' or token_string[0:6] =='ufixed': return None if prev_tok_string[0:5] =='fixed' or prev_tok_string[0:6] =='ufixed': # declare variable return token_string if pre_prev_tok_string[0:5] =='fixed' or pre_prev_tok_string[0:6] =='ufixed': if prev_tok_string in analyze.storageLocation_scope_words: # declare variable return token_string # if token_string == ']' and prev_tok_string == '[': # if next_tok_string in analyze.storageLocation_scope_words: # return '__skipnext__' # if prev_tok_string == "for": # if len(token_string) > 2: # return token_string if token_string in analyze.reserved_words: return None # if token_string in keyword_args.keys(): # return None # if prev_tok_type != tokenize.INDENT and next_tok_string != '=': # return '__skipline__' # if not ignore_length: # if len(token_string) < 3: # return None # if token_string in RESERVED_WORDS: # return None return token_string
4,565
def concat_data(labelsfile, notes_file): """ INPUTS: labelsfile: sorted by hadm id, contains one label per line notes_file: sorted by hadm id, contains one note per line """ with open(labelsfile, 'r') as lf: print("CONCATENATING") with open(notes_file, 'r') as notesfile: outfilename = '%s/notes_labeled.csv' % MIMIC_3_DIR with open(outfilename, 'w') as outfile: w = csv.writer(outfile) w.writerow(['SUBJECT_ID', 'HADM_ID', 'TEXT', 'LABELS']) labels_gen = next_labels(lf) notes_gen = next_notes(notesfile) for i, (subj_id, text, hadm_id) in enumerate(notes_gen): if i % 10000 == 0: print(str(i) + " done") cur_subj, cur_labels, cur_hadm = next(labels_gen) if cur_hadm == hadm_id: w.writerow([subj_id, str(hadm_id), text, ';'.join(cur_labels)]) else: print("couldn't find matching hadm_id. data is probably not sorted correctly") break return outfilename
4,566
def replace_text_comment(comments, new_text): """Replace "# text = " comment (if any) with one using new_text instead.""" new_text = new_text.replace('\n', ' ') # newlines cannot be represented new_text = new_text.strip(' ') new_comments, replaced = [], False for comment in comments: if comment.startswith('# text ='): new_comments.append('# text = {}'.format(new_text)) replaced = True else: new_comments.append(comment) if not replaced: new_comments.append('# text = {}'.format(new_text)) return new_comments
4,567
def mkSmartMask(**kwargs): """Routine to produce sky maps, according to defined macro-bins. """ logger.info('SmartMask production...') assert(kwargs['config'].endswith('.py')) get_var_from_file(kwargs['config']) macro_bins = data.MACRO_BINS mask_label = data.MASK_LABEL in_labels_list = data.IN_LABELS_LIST micro_bin_file = data.MICRO_BINS_FILE emin, emax, emean = get_energy_from_fits(micro_bin_file, \ minbinnum=macro_bins[0][0], maxbinnum=macro_bins[-1][1]) E_MIN, E_MAX = emin[0], emax[-1] logger.info('Checking PSF file...') PSF_FILE = kwargs['psffile'] if PSF_FILE is not None: logger.info('Using %s'%PSF_FILE) else: logger.info('ATT: File not found: %s'%PSF_FILE) logger.info('Creating PSF file with gtpsf...') try: IRFS = kwargs['irfs'] except: logger.info('ERROR: provide IRFS!') sys.exit() try: EVTYPE = kwargs['evtype'] except: logger.info('ERROR: provide event type!') sys.exit() try: LT_FILE = data.LT_FILE except: LT_FILE = '' if os.path.exists(LT_FILE): logger.info('Livetime file found.') else: try: LT_FILE = kwargs['ltfile'] except: logger.info('ERROR: provide livetime file or list!') sys.exit() if LT_FILE.lower().endswith(('.txt', '.dat')): lt_dict = {'infile1' : LT_FILE, 'outfile' : 'DEFAULT', 'chatter': 4, 'clobber': 'no'} from Xgam.utils.ScienceTools_ import gtltsum label_lt = IRFS + '_evt' + str(EVTYPE) out_gtltsum = gtltsum(label_lt,lt_dict) expcube = out_gtltsum else: expcube = LT_FILE from Xgam.utils.ScienceTools_ import gtpsf label_psf = IRFS + '_evt' + str(EVTYPE) psf_dict = {'expcube' : expcube, 'outfile' : 'DEFAULT', 'irfs' : IRFS, 'evtype' : EVTYPE, 'ra' : 45.0, 'dec' : 45.0, 'emin' : E_MIN, 'emax' : E_MAX, 'nenergies' : 500, 'thetamax' : 30.0, 'ntheta' : 100, 'chatter': 4, 'clobber': 'no'} PSF_FILE = gtpsf(label_psf, psf_dict) #NOTE: IMPLEMENT TYPE2 ?? if kwargs['typesrcmask'] == 1: from Xgam.utils.mkmask_ import mask_src_fluxPSFweighted_1 as mask_src else: from Xgam.utils.mkmask_ import mask_src_fluxPSFweighted_2 as mask_src from Xgam.utils.mkmask_ import mask_gp from Xgam.utils.parsing_ import get_psf_en_univariatespline nside = kwargs['nside'] out_label = kwargs['outflabel'] npix = hp.nside2npix(nside) src_cat = kwargs['srccat'] src_ext_cat = kwargs['srcextcat'] out_name_list = os.path.join(X_OUT, 'fits/MaskSmart_'+out_label+'_list.txt') out_list = [] for i, (minb, maxb) in enumerate(macro_bins): logger.info('Considering bins from %i to %i...' %(minb, maxb)) emin, emax, emean = get_energy_from_fits(micro_bin_file, minbinnum=minb, maxbinnum=maxb) E_MIN, E_MAX = emin[0], emax[-1] E_MEAN = np.sqrt(emax[0]*emin[-1]) logger.info('Energies %.1f - %.1f [MeV]' %(E_MIN, E_MAX)) out_name = os.path.join(X_OUT, 'fits/MaskSmart_'+out_label+'_%.1f_MeV.fits'%(E_MIN)) out_list.append(out_name) if os.path.exists(out_name) and not kwargs['overwrite']: logger.info('ATT: %s already exists! \n'%out_name) else: bad_pix = [] mask = np.ones(npix) bad_pix += mask_gp(kwargs['gpcut'], nside) psf_spline = get_psf_en_univariatespline(PSF_FILE) bad_pix += mask_src(src_cat, src_ext_cat, psf_spline, E_MIN, nside) for bpix in np.unique(bad_pix): mask[bpix] = 0 if not os.path.exists(os.path.join(X_OUT, 'fits')): os.system('mkdir %s' %os.path.join(X_OUT, 'fits')) fsky = 1-(len(np.unique(bad_pix))/float(npix)) logger.info('fsky = %.3f'%fsky) hp.write_map(out_name, mask, coord='G', overwrite=True) logger.info('Created %s \n' %out_name) if kwargs['show'] == True: import matplotlib.pyplot as plt hp.mollview(mask, cmap='bone') plt.show() logger.info('Writing list of output files: %s'%out_name_list) np.savetxt(out_name_list, out_list, fmt='%s') logger.info('Done!')
4,568
def init_show_booking_loader(response, item=None): """ init ShowingBookingLoader with optional ShowingBooking item """ loader = ShowingBookingLoader(response=response) if item: loader.add_value(None, item) return loader
4,569
def main(args): """Entry point""" args.entry_point(args)
4,570
def get_dpifac(): """get user dpi, source: node_wrangler.py""" prefs = bpy.context.preferences.system return prefs.dpi * prefs.pixel_size / 72
4,571
def seq_row( repeats: int = 1, trigger: str = Trigger.IMMEDIATE, position: int = 0, half_duration: int = MIN_PULSE, live: int = 0, dead: int = 0, ) -> List: """Create a 50% duty cycle pulse with phase1 having given live/dead values""" row = [ repeats, trigger, position, # Phase1 half_duration, live, dead, 0, 0, 0, 0, # Phase2 half_duration, 0, 0, 0, 0, 0, 0, ] return row
4,572
def github_repos(message): """ リポジトリの一覧を返す """ text = "" for repo in org.get_repos(): text += "- <{}|{}> {}\n".format(repo.html_url, repo.name, repo.description) attachments = [{ 'pretext': '{} のリポジトリ一覧'.format(settings.GITHUB_ORGANIZATION), 'text': text, 'mrkdwn_in': ['text'], }] botwebapi(message, attachments)
4,573
def call_nelder_mead_method( f, verts, x_tolerance=1e-6, y_tolerance=1e-6, computational_budget=1000, f_difference=10, calls=0, terminate_criterion=terminate_criterion_x, alpha=1, gamma=2, rho=0.5, sigma=0.5, values=[], ): """Return an approximation of a local optimum. Args: f: a real valued n_dimensional function verts: an array with n+1 n-dimensional vectors dim: a integer (equal to n) f_difference: the difference between the last and second last best approximation calls: the number of evaluations of f so far terminate_criterion: the termination criterion we are using (a function that returns a boolean) x_tolerance: A positive real number y_tolerance: A positive real number computational_budget: An integer: the maximum number of funciton evaluations alpha, gamma, rho, sigma: positive real numbers that influence how the algorithms behaves values: previously evaluated function values Returns: out_1: an approximation of a local optimum of the function out_2: number of evaluations of f """ # Pseudo code can be found on: https://en.wikipedia.org/wiki/Nelder%E2%80%93Mead_method # 0 Order if values == []: values = np.array([f(vert) for vert in verts]) calls = calls + len(verts) indexes = np.argsort(values) x_0 = np.array([0, 0]) for index in indexes[:-1]: x_0 = x_0 + verts[index] x_0 = x_0 / (len(verts) - 1) x_r = x_0 + alpha * (x_0 - verts[indexes[-1]]) x_e = x_0 + gamma * (x_r - x_0) x_c = x_0 + rho * (verts[indexes[-1]] - x_0) # 1 Termination if ( terminate_criterion(verts, f_difference, x_tolerance, y_tolerance) or f_difference < y_tolerance or calls >= computational_budget ): return [np.array(np.round(verts[indexes[0]])), calls] # 3 Reflection f_x_r = f(x_r) calls += 1 if values[indexes[0]] <= f_x_r: if f_x_r < values[indexes[-2]]: f_difference = abs(f_x_r - values[indexes[0]]) values[indexes[-1]] = f_x_r return call_nelder_mead_method( f, nm_replace_final(verts, indexes, x_r), x_tolerance, y_tolerance, computational_budget, f_difference, calls, terminate_criterion, alpha, gamma, rho, sigma, values, ) # 4 Expansion if f_x_r < values[indexes[0]]: # x_e = x_0 + gamma * (x_r - x_0) f_x_e = f(x_e) calls += 1 if f_x_e < f_x_r: f_difference = abs(f_x_e - values[indexes[0]]) values[indexes[-1]] = f_x_e return call_nelder_mead_method( f, nm_replace_final(verts, indexes, x_e), x_tolerance, y_tolerance, computational_budget, f_difference, calls, terminate_criterion, alpha, gamma, rho, sigma, values, ) else: f_difference = abs(f_x_r - values[indexes[0]]) values[indexes[-1]] = f_x_r return call_nelder_mead_method( f, nm_replace_final(verts, indexes, x_r), x_tolerance, y_tolerance, computational_budget, f_difference, calls, terminate_criterion, alpha, gamma, rho, sigma, values, ) # 5 Contraction # x_c = x_0 + rho * (verts[indexes[-1]] - x_0) f_x_c = f(x_c) if f_x_c < f(verts[indexes[-1]]): calls += 1 f_difference = abs(f_x_c - values[indexes[0]]) values[indexes[-1]] = f_x_c return call_nelder_mead_method( f, nm_replace_final(verts, indexes, x_c), x_tolerance, y_tolerance, computational_budget, f_difference, calls, terminate_criterion, alpha, gamma, rho, sigma, values, ) # 6 Shrink return call_nelder_mead_method( f, nm_shrink(verts, indexes, sigma), x_tolerance, y_tolerance, computational_budget, f_difference, calls, terminate_criterion, alpha, gamma, rho, sigma, values, )
4,574
def get_auth_claims_from_request(request): """Authenticates the request and returns claims about its authorizer. Oppia specifically expects the request to have a Subject Identifier for the user (Claim Name: 'sub'), and an optional custom claim for super-admin users (Claim Name: 'role'). Args: request: webapp2.Request. The HTTP request to authenticate. Returns: AuthClaims|None. Claims about the currently signed in user. If no user is signed in, then returns None. """ claims = _verify_id_token(request.headers.get('Authorization', '')) auth_id = claims.get('sub', None) email = claims.get('email', None) role_is_super_admin = ( claims.get('role', None) == feconf.FIREBASE_ROLE_SUPER_ADMIN) if auth_id: return auth_domain.AuthClaims(auth_id, email, role_is_super_admin) return None
4,575
def decrypt(**kwargs): """ Returns a CryptoResult containing decrypted bytes. This function requires that 'data' is in the format generated by the encrypt functionality in this SDK as well as other OCI SDKs that support client side encryption. Note this function cannot decrypt data encrypted by the KMS 'encrypt' APIs. :param oci.encryption.MasterKeyProvider master_key_provider: (required) A MasterKeyProvider to use for decrypting the data. :param bytes data: (required) The data to be decrypted. If a string is passed, it will be converted to bytes using UTF-8 encoding. Note that this conversion will require creating a copy of the data which may be undesirable for large payloads. :rtype: oci.encryption.CryptoResult """ _ensure_required_kwargs_present(required_kwargs=['master_key_provider', 'data'], provided_kwargs=kwargs) # leaves input alone if it is alread bytes, otherwise converts to bytes using default encoding # this is for convenience of the caller, but will create a copy of the data if it is not already a # bytes-like object data = convert_to_bytes(kwargs.get('data')) # as long as we only read from the stream, BytesIO does not create a copy of the data so this doesn't # add memory overhead with io.BytesIO(data) as stream_to_decrypt: decryptor = StreamDecryptor( stream_to_decrypt=stream_to_decrypt, master_key_provider=kwargs.get('master_key_provider') ) return CryptoResult(data=decryptor.read(), encryption_context=decryptor.get_encryption_context())
4,576
def format_result(result: Union[Pose, PackedPose]) -> Tuple[str, Dict[Any, Any]]: """ :param: result: Pose or PackedPose object. :return: tuple of (pdb_string, metadata) Given a `Pose` or `PackedPose` object, return a tuple containing the pdb string and a scores dictionary. """ _pdbstring = io.to_pdbstring(result) _scores_dict = io.to_dict(result) _scores_dict.pop("pickled_pose", None) return (_pdbstring, _scores_dict)
4,577
def str_array( listString): """ Becase the way tha Python prints an array is different from CPLEX, this function goes the proper CPLEX writing of Arrays :param listString: A list of values :type listString: List[] :returns: The String printing of the array, in CPLEX format :rtype: String """ ret = "{" for i in range(0, len(listString)-1): ret = ret + "\"" + listString[i] + "\"," ret = ret + "\"" + listString[i] + "\"}" return ret
4,578
def read_gene_annos(phenoFile): """Read in gene-based metadata from an HDF5 file Args: phenoFile (str): filename for the relevant HDF5 file Returns: dictionary with feature annotations """ fpheno = h5py.File(phenoFile,'r') # Feature annotations: geneAnn = {} for key in fpheno['gene_info'].keys(): geneAnn[key] = fpheno['gene_info'][key][:] fpheno.close() return geneAnn
4,579
def update_rating_deviation(session_id, dict): """ updates the rating_deviations of music genres in the database with new ratings_deviations as a Json(dict) Args: session_id (int): id of a user's session dict (dict): dictionary of rating_deviations """ cursor, connection = open_db_connection() cursor.execute( f"update session_ranking_data set rating_deviation_dict = {Json(dict)} where session_id = {session_id}") close_db_connection(cursor, connection)
4,580
def cache(builds, cache_dir, ssl_verify=True): """ Download all the builds' artifacts into our cache directory. :param set builds: set of Build objects :param str cache_dir: path to a destination directory :param ssl_verify: verify HTTPS connection or not (default: True) """ session = requests.Session() session.verify = ssl_verify for build in builds: for binary in build.binaries: binary.download(cache_dir, session=session) for source in build.sources: source.download(cache_dir, session=session)
4,581
def hex_xformat_decode(s: str) -> Optional[bytes]: """ Reverse :func:`hex_xformat_encode`. The parameter is a hex-encoded BLOB like .. code-block:: none "X'CDE7A24B1A9DBA3148BCB7A0B9DA5BB6A424486C'" Original purpose and notes: - SPECIAL HANDLING for BLOBs: a string like ``X'01FF'`` means a hex-encoded BLOB. Titanium is rubbish at BLOBs, so we encode them as special string literals. - SQLite uses this notation: https://sqlite.org/lang_expr.html - Strip off the start and end and convert it to a byte array: http://stackoverflow.com/questions/5649407 """ if len(s) < 3 or not s.startswith("X'") or not s.endswith("'"): return None return binascii.unhexlify(s[2:-1])
4,582
def threshold(data, direction): """ Find a suitable threshold value which maximizes explained variance of the data projected onto direction. NOTE: the chosen hyperplane would be described mathematically as $ x \dot direction = threshold $. """ projected_data = np.inner(data, direction) sorted_x = np.sort(projected_data) best_sep_index = explained_variance_list(sorted_x).argmax() return (sorted_x[best_sep_index] + sorted_x[best_sep_index + 1]) / 2
4,583
def StrType_any(*x): """ Ignores all parameters to return a StrType """ return StrType()
4,584
def _download(url, dest, timeout=30): """Simple HTTP/HTTPS downloader.""" # Optional import: requests is not needed for local big data setup. import requests dest = os.path.abspath(dest) with requests.get(url, stream=True, timeout=timeout) as r: with open(dest, 'w+b') as data: for chunk in r.iter_content(chunk_size=0x4000): data.write(chunk) return dest
4,585
def height(tree): """Return the height of tree.""" if tree.is_empty(): return 0 else: return 1+ max(height(tree.left_child()),\ height(tree.right_child()))
4,586
def applyTelluric(model, tell_alpha=1.0, airmass=1.5, pwv=0.5): """ Apply the telluric model on the science model. Parameters ---------- model : model object BT Settl model alpha : float telluric scaling factor (the power on the flux) Returns ------- model : model object BT Settl model times the corresponding model """ # read in a telluric model wavelow = model.wave[0] - 10 wavehigh = model.wave[-1] + 10 #telluric_model = smart.getTelluric(wavelow=wavelow, wavehigh=wavehigh, alpha=alpha, airmass=airmass) telluric_model = smart.Model() telluric_model.wave, telluric_model.flux = smart.InterpTelluricModel(wavelow=wavelow, wavehigh=wavehigh, airmass=airmass, pwv=pwv) # apply the telluric alpha parameter telluric_model.flux = telluric_model.flux**(tell_alpha) #if len(model.wave) > len(telluric_model.wave): # print("The model has a higher resolution ({}) than the telluric model ({})."\ # .format(len(model.wave),len(telluric_model.wave))) # model.flux = np.array(smart.integralResample(xh=model.wave, # yh=model.flux, xl=telluric_model.wave)) # model.wave = telluric_model.wave # model.flux *= telluric_model.flux #elif len(model.wave) < len(telluric_model.wave): ## This should be always true telluric_model.flux = np.array(smart.integralResample(xh=telluric_model.wave, yh=telluric_model.flux, xl=model.wave)) telluric_model.wave = model.wave model.flux *= telluric_model.flux #elif len(model.wave) == len(telluric_model.wave): # model.flux *= telluric_model.flux return model
4,587
def max_power_rule(mod, g, tmp): """ **Constraint Name**: DAC_Max_Power_Constraint **Enforced Over**: DAC_OPR_TMPS Power consumption cannot exceed capacity. """ return ( mod.DAC_Consume_Power_MW[g, tmp] <= mod.Capacity_MW[g, mod.period[tmp]] * mod.Availability_Derate[g, tmp] )
4,588
async def list_subs(request: Request): """ List all subscription objects """ # Check for master token if not request.headers.get("Master") == os.environ.get("MASTER_TOKEN"): raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid authentication token", ) subs = await engine.find(Subscription) return subs
4,589
def check_archs( copied_libs, # type: Mapping[Text, Mapping[Text, Text]] require_archs=(), # type: Union[Text, Iterable[Text]] stop_fast=False, # type: bool ): # type: (...) -> Set[Union[Tuple[Text, FrozenSet[Text]], Tuple[Text, Text, FrozenSet[Text]]]] # noqa: E501 """Check compatibility of archs in `copied_libs` dict Parameters ---------- copied_libs : dict dict containing the (key, value) pairs of (``copied_lib_path``, ``dependings_dict``), where ``copied_lib_path`` is a library real path that has been copied during delocation, and ``dependings_dict`` is a dictionary with key, value pairs where the key is a path in the target being delocated (a wheel or path) depending on ``copied_lib_path``, and the value is the ``install_name`` of ``copied_lib_path`` in the depending library. require_archs : str or sequence, optional Architectures we require to be present in all library files in wheel. If an empty sequence, just check that depended libraries do have the architectures of the depending libraries, with no constraints on what these architectures are. If a sequence, then a set of required architectures e.g. ``['i386', 'x86_64']`` to specify dual Intel architectures. If a string, then a standard architecture name as returned by ``lipo -info``, or the string "intel", corresponding to the sequence ``['i386', 'x86_64']``, or the string "universal2", corresponding to ``['x86_64', 'arm64']``. stop_fast : bool, optional Whether to give up collecting errors after the first Returns ------- bads : set set of length 2 or 3 tuples. A length 2 tuple is of form ``(depending_lib, missing_archs)`` meaning that an arch in `require_archs` was missing from ``depending_lib``. A length 3 tuple is of form ``(depended_lib, depending_lib, missing_archs)`` where ``depended_lib`` is the filename of the library depended on, ``depending_lib`` is the library depending on ``depending_lib`` and ``missing_archs`` is a set of missing architecture strings giving architectures present in ``depending_lib`` and missing in ``depended_lib``. An empty set means all architectures were present as required. """ if isinstance(require_archs, str): require_archs = _ARCH_LOOKUP.get(require_archs, [require_archs]) require_archs_set = frozenset(require_archs) bads = ( [] ) # type: List[Union[Tuple[Text, FrozenSet[Text]], Tuple[Text, Text, FrozenSet[Text]]]] # noqa: E501 for depended_lib, dep_dict in copied_libs.items(): depended_archs = get_archs(depended_lib) for depending_lib, install_name in dep_dict.items(): depending_archs = get_archs(depending_lib) all_required = depending_archs | require_archs_set all_missing = all_required.difference(depended_archs) if len(all_missing) == 0: continue required_missing = require_archs_set.difference(depended_archs) if len(required_missing): bads.append((depending_lib, required_missing)) else: bads.append((depended_lib, depending_lib, all_missing)) if stop_fast: return set(bads) return set(bads)
4,590
def create_data_table(headers, columns, match_tol=20) -> pd.DataFrame: """Based on headers and column data, create the data table.""" # Store the bottom y values of all of the row headers header_tops = np.array([h.top for h in headers]) # Set up the grid: nrows by ncols nrows = len(headers) ncols = len(columns) + 1 # Initialize the grid grid = np.empty((nrows, ncols), dtype=object) grid[:, :] = "" # Default value # Add in the headers grid[:, 0] = [h.text for h in headers] # Loop over each column for col_num, xval in enumerate(columns): col = columns[xval] word_tops = np.array([w.top for w in col]) # Find closest row header for row_num, h in enumerate(headers): # Find closest word ot this row heasder word_diff = np.abs(word_tops - h.top) word_diff[word_diff > match_tol] = np.nan # Make sure the row header is vertically close enough if np.isnan(word_diff).sum() < len(word_diff): # Get the matching word for this row header notnull = ~np.isnan(word_diff) order = np.argsort(word_diff[notnull]) for word_index in np.where(notnull)[0][order]: word = col[word_index] # IMPORTANT: make sure this is the closest row header # Sometimes words will match to more than one header header_diff = np.abs(header_tops - word.top) header_index = np.argmin(header_diff) closest_header = headers[header_index] if closest_header == h: grid[row_num, col_num + 1] = col[word_index].text break return pd.DataFrame(grid)
4,591
def interp2d_vis(model, model_lsts, model_freqs, data_lsts, data_freqs, flags=None, kind='cubic', flag_extrapolate=True, medfilt_flagged=True, medfilt_window=(3, 7), fill_value=None): """ Interpolate complex visibility model onto the time & frequency basis of a data visibility. See below for notes on flag propagation if flags is provided. Parameters: ----------- model : type=DataContainer, holds complex visibility for model keys are antenna-pair + pol tuples, values are 2d complex visibility with shape (Ntimes, Nfreqs). model_lsts : 1D array of the model time axis, dtype=float, shape=(Ntimes,) model_freqs : 1D array of the model freq axis, dtype=float, shape=(Nfreqs,) data_lsts : 1D array of the data time axis, dtype=float, shape=(Ntimes,) data_freqs : 1D array of the data freq axis, dtype=float, shape=(Nfreqs,) flags : type=DataContainer, dictionary containing model flags. Can also contain model wgts as floats and will convert to booleans appropriately. kind : type=str, kind of interpolation, options=['linear', 'cubic', 'quintic'] medfilt_flagged : type=bool, if True, before interpolation, replace flagged pixels with output from a median filter centered on each flagged pixel. medfilt_window : type=tuple, extent of window for median filter across the (time, freq) axes. Even numbers are rounded down to odd number. flag_extrapolate : type=bool, flag extrapolated data_lsts if True. fill_value : type=float, if fill_value is None, extrapolated points are extrapolated else they are filled with fill_value. Output: (new_model, new_flags) ------- new_model : interpolated model, type=DataContainer new_flags : flags associated with interpolated model, type=DataContainer Notes: ------ If the data has flagged pixels, it is recommended to turn medfilt_flagged to True. This runs a median filter on the flagged pixels and replaces their values with the results, but they remain flagged. This happens *before* interpolation. This means that interpolation near flagged pixels aren't significantly biased by their presence. In general, if flags are fed, flags are propagated if a flagged pixel is a nearest neighbor of an interpolated pixel. """ # make flags new_model = odict() new_flags = odict() # get nearest neighbor points freq_nn = np.array(list(map(lambda x: np.argmin(np.abs(model_freqs - x)), data_freqs))) time_nn = np.array(list(map(lambda x: np.argmin(np.abs(model_lsts - x)), data_lsts))) freq_nn, time_nn = np.meshgrid(freq_nn, time_nn) # get model indices meshgrid mod_F, mod_L = np.meshgrid(np.arange(len(model_freqs)), np.arange(len(model_lsts))) # raise warning on flags if flags is not None and medfilt_flagged is False: print("Warning: flags are fed, but medfilt_flagged=False. \n" "This may cause weird behavior of interpolated points near flagged data.") # ensure flags are booleans if flags is not None: if np.issubdtype(flags[list(flags.keys())[0]].dtype, np.floating): flags = DataContainer(odict(list(map(lambda k: (k, ~flags[k].astype(np.bool)), flags.keys())))) # loop over keys for i, k in enumerate(list(model.keys())): # get model array m = model[k] # get real and imag separately real = np.real(m) imag = np.imag(m) # median filter flagged data if desired if medfilt_flagged and flags is not None: # get extent of window along freq and time f_ext = int((medfilt_window[1] - 1) / 2.) t_ext = int((medfilt_window[0] - 1) / 2.) # set flagged data to nan real[flags[k]] *= np.nan imag[flags[k]] *= np.nan # get flagged indices f_indices = mod_F[flags[k]] l_indices = mod_L[flags[k]] # construct fill arrays real_fill = np.empty(len(f_indices), np.float) imag_fill = np.empty(len(f_indices), np.float) # iterate over flagged data and replace w/ medfilt for j, (find, tind) in enumerate(zip(f_indices, l_indices)): tlow, thi = tind - t_ext, tind + t_ext + 1 flow, fhi = find - f_ext, find + f_ext + 1 ll = 0 while True: # iterate until window has non-flagged data in it # with a max of 10 iterations if tlow < 0: tlow = 0 if flow < 0: flow = 0 r_med = np.nanmedian(real[tlow:thi, flow:fhi]) i_med = np.nanmedian(imag[tlow:thi, flow:fhi]) tlow -= 2 thi += 2 flow -= 2 fhi += 2 ll += 1 if not (np.isnan(r_med) or np.isnan(i_med)): break if ll > 10: break real_fill[j] = r_med imag_fill[j] = i_med # fill real and imag real[l_indices, f_indices] = real_fill imag[l_indices, f_indices] = imag_fill # flag residual nans resid_nans = np.isnan(real) + np.isnan(imag) flags[k] += resid_nans # replace residual nans real[resid_nans] = 0.0 imag[resid_nans] = 0.0 # propagate flags to nearest neighbor if flags is not None: f = flags[k][time_nn, freq_nn] # check f is boolean type if np.issubdtype(f.dtype, np.floating): f = ~(f.astype(np.bool)) else: f = np.zeros_like(real, bool) # interpolate interp_real = interpolate.interp2d(model_freqs, model_lsts, real, kind=kind, copy=False, bounds_error=False, fill_value=fill_value)(data_freqs, data_lsts) interp_imag = interpolate.interp2d(model_freqs, model_lsts, imag, kind=kind, copy=False, bounds_error=False, fill_value=fill_value)(data_freqs, data_lsts) # flag extrapolation if desired if flag_extrapolate: time_extrap = np.where((data_lsts > model_lsts.max() + 1e-6) | (data_lsts < model_lsts.min() - 1e-6)) freq_extrap = np.where((data_freqs > model_freqs.max() + 1e-6) | (data_freqs < model_freqs.min() - 1e-6)) f[time_extrap, :] = True f[:, freq_extrap] = True # rejoin new_model[k] = interp_real + 1j * interp_imag new_flags[k] = f return DataContainer(new_model), DataContainer(new_flags)
4,592
def eps_divide(n, d, eps=K.epsilon()): """ perform division using eps """ return (n + eps) / (d + eps)
4,593
def generate_preflib_election(experiment=None, model=None, name=None, num_voters=None, num_candidates=None, folder=None, selection_method='random'): """ main function: generate elections""" votes = generate_votes_preflib(model, selection_method=selection_method, num_voters=num_voters, num_candidates=num_candidates, folder=folder) path = os.path.join("experiments", experiment.experiment_id, "elections", name + ".soc") file_ = open(path, 'w') file_.write(str(num_candidates) + "\n") for i in range(num_candidates): file_.write(str(i) + ', c' + str(i) + "\n") c = Counter(map(tuple, votes)) counted_votes = [[count, list(row)] for row, count in c.items()] counted_votes = sorted(counted_votes, reverse=True) file_.write(str(num_voters) + ', ' + str(num_voters) + ', ' + str(len(counted_votes)) + "\n") for i in range(len(counted_votes)): file_.write(str(counted_votes[i][0]) + ', ') for j in range(num_candidates): file_.write(str(counted_votes[i][1][j])) if j < num_candidates - 1: file_.write(", ") else: file_.write("\n") file_.close()
4,594
def display_multi_grid(kline_settings={}): """显示多图""" from vnpy.trader.ui import create_qapp qApp = create_qapp() w = GridKline(kline_settings=kline_settings) w.showMaximized() sys.exit(qApp.exec_())
4,595
def test_invalid_interface(): """An invalid interface should raise a 'ValueError'.""" psutil.net_if_addrs = MagicMock(return_value=test_net_if_addrs) with pytest.raises(ValueError): interface_subnets('invalid')
4,596
def bootstrapping_state_tomography(data_dict, keys_in, store_rhos=False, verbose=False, **params): """ Computes bootstrapping statistics of the density matrix fidelity. :param data_dict: OrderedDict containing thresholded shots specified by keys_in, and where processed results will be stored :param keys_in: list of key names or dictionary keys paths in data_dict for the data to be analyzed (expects thresholded shots) :param store_rhos: whether to store the density matrices in addition to the bootstrapping fidelities. :param verbose: whether to show progress print statements :param params: keyword arguments Expects to find either in data_dict or in params: - Nbstrp: int specifying the number of bootstrapping cycles, i.e. sample size for estimating errors, the number of times the raw data is resampled - timestamps: list of with the timestamps of the state tomo msmt :return: stores in data_dict: - {estimation_type}.bootstrapping_fidelities - (optionally) {estimation_type}.bootstrapping_rhos for estimation_type in estimation_types Assumptions: - CURRENTLY ONLY SUPPORTS DATA FROM HDF FILES! - !! This function calls state_tomography_analysis so all required input params needed there must also be here """ Nbstrp = hlp_mod.get_param('Nbstrp', data_dict, raise_error=True, **params) data_to_proc_dict = hlp_mod.get_data_to_process(data_dict, keys_in) prep_params = hlp_mod.get_param('preparation_params', data_dict, default_value={}, **params) preselection = prep_params.get('preparation_type', 'wait') == 'preselection' n_readouts = hlp_mod.get_param('n_readouts', data_dict, raise_error=True, **params) raw_data = np.concatenate([np.reshape(arr, (len(arr), 1)) for arr in data_to_proc_dict.values()], axis=1) n_shots = len(raw_data[:, 1]) // n_readouts timestamp = hlp_mod.get_param('timestamps', data_dict, raise_error=True, **params) if len(timestamp) > 1: raise ValueError(f'Bootstrapping can only be done for one data file. ' f'{len(timestamp)} timestamps were found.') data_dict_temp = {} hlp_mod.add_param('cal_points', hlp_mod.get_param('cal_points', data_dict, **params), data_dict_temp) hlp_mod.add_param('meas_obj_value_names_map', hlp_mod.get_param('meas_obj_value_names_map', data_dict, **params), data_dict_temp) hlp_mod.add_param('preparation_params', hlp_mod.get_param('preparation_params', data_dict, **params), data_dict_temp) hlp_mod.add_param('rho_target', hlp_mod.get_param('rho_target', data_dict), data_dict_temp) data_dict_temp = dat_extr_mod.extract_data_hdf(timestamps=timestamp, data_dict=data_dict_temp) estimation_types = hlp_mod.get_param('estimation_types', data_dict, default_value=('least_squares', 'max_likelihood'), **params) fidelities = {est_type: np.zeros(Nbstrp) for est_type in estimation_types} if store_rhos: rhos = {est_type: Nbstrp*[''] for est_type in estimation_types} params.pop('do_plotting', False) params.pop('prepare_plotting', False) replace_value = params.pop('replace_value', False) # do bootstrapping Nbstrp times for n in range(Nbstrp): if verbose: print('Bootstrapping run state tomo: ', n) sample_i = bootstrapping(measured_data=raw_data, n_readouts=n_readouts, n_shots=n_shots, preselection=preselection) for i, keyi in enumerate(data_to_proc_dict): hlp_mod.add_param(keyi, sample_i[:, i], data_dict_temp, add_param_method='replace') state_tomography_analysis(data_dict_temp, keys_in=keys_in, do_plotting=False, prepare_plotting=False, replace_value=True, **params) for estimation_type in estimation_types: fidelities[estimation_type][n] = hlp_mod.get_param( f'{estimation_type}.fidelity', data_dict_temp, raise_error=True) if store_rhos: rhos[estimation_type][n] = hlp_mod.get_param( f'{estimation_type}.rho', data_dict_temp, raise_error=True) params['replace_value'] = replace_value hlp_mod.add_param('Nbstrp', Nbstrp, data_dict, **params) for estimation_type in fidelities: hlp_mod.add_param(f'{estimation_type}.bootstrapping_fidelities', fidelities[estimation_type], data_dict, **params) if store_rhos: hlp_mod.add_param(f'{estimation_type}.bootstrapping_rhos', rhos[estimation_type], data_dict, **params)
4,597
def get_mc_uuid(username): """Gets the Minecraft UUID for a username""" url = f"https://api.mojang.com/users/profiles/minecraft/{username}" res = requests.get(url) if res.status_code == 204: raise ValueError("Users must have a valid MC username") else: return res.json().get("id")
4,598
def _resolve_credentials(fqdn, login): """Look up special forms of credential references.""" result = login if "$" in result: result = os.path.expandvars(result) if result.startswith("netrc:"): result = result.split(':', 1)[1] if result: result = os.path.abspath(os.path.expanduser(result)) accounts = netrc.netrc(result or None) account = accounts.authenticators(fqdn) if not account or not(account[0] or account[1]): raise dputhelper.DputUploadFatalException("Cannot find account for host %s in %s netrc file" % ( fqdn, result or "default")) # account is (login, account, password) user, pwd = account[0] or account[1], account[2] or "" result = "%s:%s" % (user, pwd) else: if result.startswith("file:"): result = os.path.abspath(os.path.expanduser(result.split(':', 1)[1])) with closing(open(result, "r")) as handle: result = handle.read().strip() try: user, pwd = result.split(':', 1) except ValueError: user, pwd = result, "" trace("Resolved login credentials to %(user)s:%(pwd)s", user=user, pwd='*' * len(pwd)) return result
4,599