code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def set_width(self, width): <NEW_LINE> <INDENT> self._width = width if width > self._width else self._width
Set the widths of this Column. This method can be called if the width of the column can be enlarged. If the parameter width is smaller then the member variable width, then this method has no effect @param width the new width
625941b5090684286d50eada
def create_df(genes): <NEW_LINE> <INDENT> df_dict = {} <NEW_LINE> for name, gene in genes.items(): <NEW_LINE> <INDENT> df_dict[name] = {'Mat_Counts': gene.mat_counts, 'Pat_Counts': gene.pat_counts, 'N_Counts': gene.n_counts, 'Others': gene.other_counts, 'SNPs': len(gene), 'Winner': gene.win, 'pval': gene.pval, 'Mat_wins': gene.mat_win, 'Pat_wins': gene.pat_win, 'Not_Sig': gene.not_sig, 'Weird': gene.weird, 'Failed': gene.failed} <NEW_LINE> <DEDENT> column_order=['Mat_Counts', 'Pat_Counts', 'N_Counts', 'Others', 'SNPs', 'Winner', 'pval', 'Mat_wins', 'Pat_wins', 'Not_Sig', 'Weird', 'Failed'] <NEW_LINE> df = pandas.DataFrame.from_dict(df_dict, orient='index') <NEW_LINE> df.index.name = 'GENE' <NEW_LINE> df = df[column_order] <NEW_LINE> return df
Make a pandas dataframe from a dictionary of genes. Datafram has the following columns:: Counts:: 'Mat_Counts' -- Total number of maternal counts for this gene 'Pat_Counts' -- Total number of paternal counts for this gene 'N_Counts' -- Total number of reads with N in the SNP position 'Others' -- Total number of reads with a non-parental allele Gene-level summary:: 'Winner' -- The overall winner ('mat' or 'pat') 'pval' -- The pvalue of that association (binomial) SNP-level information:: 'SNPs' -- Total number of SNPs in this gene 'Mat_wins' -- Total number of SNPs with materal wins 'Pat_wins' -- Total number of SNPs with pateral wins 'Not_Sig' -- Total number of SNPs that weren't significant 'Weird' -- Total number of SNPs with non-parental allele 'Failed' -- Total number of SNPs that failed for some reason, (usually due to Ns in the sequence)
625941b5ad47b63b2c509d86
def __init__(self, *args, **kwargs): <NEW_LINE> <INDENT> _simuPOP_op.CombinedSplitter_swiginit(self, _simuPOP_op.new_CombinedSplitter(*args, **kwargs))
Usage: CombinedSplitter(splitters=[], vspMap=[], names=[]) Details: Create a combined splitter using a list of splitters. For example, CombinedSplitter([SexSplitter(), AffectionSplitter()]) defines a combined splitter with four VSPs, defined by male (vsp 0), female (vsp 1), unaffected (vsp 2) and affected individuals (vsp 3). Optionally, a new set of VSPs could be defined by parameter vspMap. Each item in this parameter is a list of VSPs that will be combined to a single VSP. For example, vspMap=[(0, 2), (1, 3)] in the previous example will define two VSPs defined by male or unaffected, and female or affected individuals. VSP names are usually determined by splitters, but can also be specified using parameter names.
625941b55510c4643540f1f4
def contains(self, value): <NEW_LINE> <INDENT> while self.head != self.tail: <NEW_LINE> <INDENT> if self.head.data == value: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> self.head = self.head.next <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return False
Checks if any node contains value Args: value: The value to look for Returns: True if value is in the list, False otherwise
625941b5b7558d58953c4d17
def getUntrustedCertificateCause(self, url): <NEW_LINE> <INDENT> if url is None: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> if not SSLUtil.isSSLCertificatesVerificationEnabled(): <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> r = requests.get(url, verify=True, timeout=self.timeout) <NEW_LINE> <DEDENT> except requests.exceptions.SSLError as e: <NEW_LINE> <INDENT> msg = str(e) <NEW_LINE> self.logger.warn('Untrusted Certificate Cause: %s', msg) <NEW_LINE> p = re.compile( '.*\\[SSL: (?P<constant>[A-Z_]+)\\] (?P<message>[^\\(]+)' ) <NEW_LINE> m = p.match(msg) <NEW_LINE> if m is not None: <NEW_LINE> <INDENT> if m.groups('message') is not None: <NEW_LINE> <INDENT> return m.groups('message')[0].strip() <NEW_LINE> <DEDENT> <DEDENT> if msg.rfind(':') > 0: <NEW_LINE> <INDENT> msg = msg[(msg.rfind(':')+1):] <NEW_LINE> <DEDENT> return msg <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> return str(e) <NEW_LINE> <DEDENT> return None
Getting untrusted certificate cause
625941b52ae34c7f2600cf2d
def load_data_from_redis(self, key=None): <NEW_LINE> <INDENT> if self.connect is None or self.connect.ping() is False: <NEW_LINE> <INDENT> self.logger.error("cannot connect to redis, rebuild connection...") <NEW_LINE> self.logger.error("Please try again.") <NEW_LINE> self.connect = self.__connect_redis() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> data = self.connect.get(name=key) <NEW_LINE> if data is not None: <NEW_LINE> <INDENT> data = json.loads(data) <NEW_LINE> return data <NEW_LINE> <DEDENT> <DEDENT> return None
从 Redis 中加载查询子图数据 :return:
625941b5e1aae11d1e749aae
def check_outlier(dataframe, col_name): <NEW_LINE> <INDENT> low_limit, up_limit = outlier_thresholds(dataframe, col_name, 0.05, 0.95) <NEW_LINE> if dataframe[(dataframe[col_name] > up_limit) | (dataframe[col_name] < low_limit)].any(axis=None): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return False
This function stands for checking if a column of a dataframe have outlier or not according to results of outlier_thresholds function. Parameters ---------- dataframe: pandas.core.frame.DataFrame DataFrame that we want to check for outlier values. col_name: str Column name to check for outlier values. Returns: bool True, False -------
625941b5cdde0d52a9e52e29
def densenet121(mask_init='1s', mask_scale=1e-2, threshold_fn='binarizer', **kwargs): <NEW_LINE> <INDENT> model = DenseNet(mask_init, mask_scale, threshold_fn, num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16), **kwargs) <NEW_LINE> return model
Densenet-121 model from `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
625941b5460517430c393f8b
def _sync_user(self, request, user): <NEW_LINE> <INDENT> pass
Called after a user is fetched/created and syncs any additional properties from the JWT's payload to the user object.
625941b55166f23b2e1a4f54
def help(self): <NEW_LINE> <INDENT> sys.stdout.write(self.usage()) <NEW_LINE> sys.stdout.write('\n')
Prints the usage
625941b5099cdd3c635f0a58
def betterEvaluationFunction(currentGameState): <NEW_LINE> <INDENT> util.raiseNotDefined()
Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable evaluation function. DESCRIPTION: <write something here so we know what you did>
625941b53346ee7daa2b2b64
def __init__(self,X,Y,x,y): <NEW_LINE> <INDENT> checkVectorSpace("X",X) <NEW_LINE> checkVectorSpace("Y",Y) <NEW_LINE> Optizelle.Unconstrained.State.allocateVectors(self,X,x) <NEW_LINE> allocateVectors(self,X,Y,x,y) <NEW_LINE> EqualityConstrainedStateCreate(self,X,Y,x,y)
Constructor
625941b50fa83653e4656db9
def download_and_unzip_data(self, destination): <NEW_LINE> <INDENT> logger.debug("Requesting {}".format(self.url)) <NEW_LINE> response = requests.get(self.url) <NEW_LINE> archive = zipfile.ZipFile(io.BytesIO(response.content)) <NEW_LINE> logger.debug("Extracting archive into {}".format(str(destination))) <NEW_LINE> archive.extractall(path=destination)
Download zipped shapfiles
625941b5de87d2750b85fb89
def atomic_number(symbol): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return atomic_symbols.index(symbol.title()) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> return None
Returns the atomic number of an element, given its atomic symbol Parameters: ----------- * symbol: The atomic symbol of the element whose atomic number is being requested. This is a 1 or 2 character string, e.g. 'H', 'Si', etc. It is case insensitive and leading or trailing blanks are ignored. Outputs: -------- This function returns the atomic number of the input element. If an invalid atomic symbol is input then the function returns 0.
625941b563b5f9789fde6ee1
def list_dataset_for_blobname(cursor, blobname): <NEW_LINE> <INDENT> sql = "SELECT ID, DATETIME, STORAGE_ID FROM BLOBS WHERE BLOBNAME = :filename" <NEW_LINE> param = {'filename': blobname} <NEW_LINE> cursor.execute(sql, param) <NEW_LINE> data = [ { 'id': x[0], 'datatime': x[1], 'storage_id':x[2] } for x in cursor.fetchall()] <NEW_LINE> return data
Listet alle gespeicherten Datensätze in der Datenbank auf die unter filename gespeichert wurden Arguments: cursor -- Datenbank Cursor blobname -- Name unter dem der gesuchte blob gespeichert ist. Returns: Eine Liste von Dictionaries die die gefundenen Einträge enthalten.
625941b54c3428357757c126
def user_exists(username): <NEW_LINE> <INDENT> return username in pending_users or self.user_data(username) is not None
Helper function that checks if a user already exists
625941b566673b3332b91e92
def get_members_ordered_by_explore_point(server_id, char_id, names=True, limit=None): <NEW_LINE> <INDENT> docs = MongoUnionMember.db(server_id).find( {'explore_point': {'$gt': 0}} ).sort('explore_point', -1) <NEW_LINE> result = [] <NEW_LINE> self_info = None <NEW_LINE> for index, doc in enumerate(docs): <NEW_LINE> <INDENT> rank = index + 1 <NEW_LINE> if limit and rank > limit: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> obj = _ExploreMember() <NEW_LINE> obj.rank = rank <NEW_LINE> obj.id = doc['_id'] <NEW_LINE> obj.explore_point = doc['explore_point'] <NEW_LINE> result.append(obj) <NEW_LINE> if doc['_id'] == char_id: <NEW_LINE> <INDENT> self_info = obj <NEW_LINE> <DEDENT> <DEDENT> if names: <NEW_LINE> <INDENT> char_ids = [r.id for r in result] <NEW_LINE> names = batch_get_club_property(server_id, char_ids, 'name') <NEW_LINE> for r in result: <NEW_LINE> <INDENT> r.name = names[r.id] <NEW_LINE> <DEDENT> <DEDENT> if char_id == 0: <NEW_LINE> <INDENT> return result, None <NEW_LINE> <DEDENT> if self_info: <NEW_LINE> <INDENT> return result, self_info <NEW_LINE> <DEDENT> doc = MongoUnionMember.db(server_id).find_one({'_id': char_id}, {'explore_point': 1}) <NEW_LINE> if not doc: <NEW_LINE> <INDENT> return result, None <NEW_LINE> <DEDENT> self_info = _ExploreMember() <NEW_LINE> self_info.id = char_id <NEW_LINE> self_info.name = get_club_property(server_id, char_id, 'name') <NEW_LINE> self_info.explore_point = doc.get('explore_point', 0) <NEW_LINE> if not self_info.explore_point: <NEW_LINE> <INDENT> self_info.rank = 0 <NEW_LINE> return result, self_info <NEW_LINE> <DEDENT> rank = MongoUnionMember.db(server_id).find( {'explore_point': {'$gt': self_info.explore_point}} ).count() <NEW_LINE> self_info.rank = rank <NEW_LINE> return result, self_info
:rtype: (list[_ExploreMember], _ExploreMember | None)
625941b5377c676e91271fa6
@Files.route("/image/<objectid:file_id>", defaults={'size': None}, methods=['GET']) <NEW_LINE> @Files.route("/image/<objectid:file_id>/<string:size>", methods=['GET']) <NEW_LINE> @require_token() <NEW_LINE> def process_image_request(file_id, size): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> sizes = {'small': (140, 100), 'medium': (400, 300), 'large': (1200, 1000) } <NEW_LINE> col = app.data.driver.db['files'] <NEW_LINE> image = col.find_one({'_id': ObjectId(file_id)}) <NEW_LINE> grid_fs = GridFS(app.data.driver.db) <NEW_LINE> if not grid_fs.exists(_id=image['file']): <NEW_LINE> <INDENT> return eve_abort(500, 'No file system found') <NEW_LINE> <DEDENT> im_stream = grid_fs.get_last_version(_id=image['file']) <NEW_LINE> im = Image.open(im_stream) <NEW_LINE> if size != 'original': <NEW_LINE> <INDENT> im.thumbnail(sizes[size], Image.ANTIALIAS) <NEW_LINE> <DEDENT> img_io = io.BytesIO() <NEW_LINE> im.save(img_io, 'PNG', quality=100) <NEW_LINE> img_io.seek(0) <NEW_LINE> encoded_img = base64.b64encode(img_io.read()) <NEW_LINE> dict = {'mimetype': 'image/png', 'encoding': 'base64', 'src': encoded_img } <NEW_LINE> return jsonify(**dict) <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> return eve_abort(404, 'Image not found or errors processing')
Resizes images to size and returns a base64 encoded string representing the image
625941b591af0d3eaac9b80f
def format_data(self, value): <NEW_LINE> <INDENT> return self.__call__(value)
Return the full string representation of the value with the position unspecified.
625941b560cbc95b062c6344
def __init__(self, command_targets, load_targets, blade_path, working_dir, build_dir, blade_root_dir, blade_options, command): <NEW_LINE> <INDENT> self.__command_targets = command_targets <NEW_LINE> self.__load_targets = load_targets <NEW_LINE> self.__blade_path = blade_path <NEW_LINE> self.__working_dir = working_dir <NEW_LINE> self.__build_dir = build_dir <NEW_LINE> self.__root_dir = blade_root_dir <NEW_LINE> self.__options = blade_options <NEW_LINE> self.__command = command <NEW_LINE> self.__current_source_path = blade_root_dir <NEW_LINE> self.__blade_revision = None <NEW_LINE> self.__direct_targets = [] <NEW_LINE> self.__expanded_command_targets = [] <NEW_LINE> self.__target_database = {} <NEW_LINE> self.__build_targets = {} <NEW_LINE> self.__sorted_targets_keys = [] <NEW_LINE> self.__targets_expanded = False <NEW_LINE> self.__build_time = time.time() <NEW_LINE> self.__build_toolchain = ToolChain() <NEW_LINE> self.build_accelerator = BuildAccelerator(self.__root_dir, self.__build_toolchain) <NEW_LINE> self.__build_jobs_num = 0 <NEW_LINE> self.__test_jobs_num = 0 <NEW_LINE> self.svn_root_dirs = [] <NEW_LINE> self._verify_history_path = os.path.join(build_dir, '.blade_verify.json') <NEW_LINE> self._verify_history = { 'header_inclusion_dependencies': {}, } <NEW_LINE> self.__build_script = os.path.join(self.__build_dir, 'build.ninja') <NEW_LINE> self.__all_rule_names = []
init method. Args: command_targets: List[str], target patterns are specified in command line. load_targets: List[str], target patterns should be loaded from workspace. It usually should be same as the command_targets, but in query dependents mode, all targets should be loaded. blade_path: str, the path of the `blade` python module, used to be called by builtin tools.
625941b550485f2cf553cb94
def destroy(self): <NEW_LINE> <INDENT> self.destroy_catalogue() <NEW_LINE> if self._archive_exists(): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> util.remove_path(self._root) <NEW_LINE> <DEDENT> except EnvironmentError as _error: <NEW_LINE> <INDENT> raise Error("unable to remove archive root path '%s' [%s]" % (self._root, _error))
Completely remove the archive, both the products as well as the product catalogue. Using the archive after calling this function results in undefined behavior. The prepare() function can be used to bring the archive back into a useable state.
625941b591f36d47f21ac2f1
def test_format_child(self, mock_nested_config): <NEW_LINE> <INDENT> assert mock_nested_config.format("{CHILD.BAR}") == "bar" <NEW_LINE> assert mock_nested_config.format("{CHILD.GRANDCHILD.XOO}") == "xoo" <NEW_LINE> mock_nested_config.CHILD.NESTED = "{CHILD.GRANDCHILD.XOO}" <NEW_LINE> mock_nested_config.CHILD.GRANDCHILD.NESTED_2ND_LEVEL = "{CHILD.NESTED}" <NEW_LINE> assert mock_nested_config.format("{CHILD.NESTED}") == "xoo" <NEW_LINE> assert mock_nested_config.format("{CHILD.GRANDCHILD.NESTED_2ND_LEVEL}") == "xoo"
Child config should be usable by format
625941b50a50d4780f666c8a
def wikidata_items(self): <NEW_LINE> <INDENT> return self.wf.resource("[email protected]", dir=corpora.wikidir(), format="records/frame")
Resource for wikidata items. This is a set of record files where each WikiData item is represented as a frame: <qid>: { =<qid> :/w/item name: "..." description: "..." alias: { name: "..." lang: /lang/<lang> sources: ... } ... /w/wikipedia: { /lang/<lang>: <wid> ... } ... properties } <qid>: Wikidata item id (Q<item number>, e.g. Q35) <pid>: Wikidata property id (P<property number>, e.g. P31) <wid>: Wikipedia page id (/wp/<lang>/<pageid>, /wp/en/76972)
625941b53eb6a72ae02ec2d5
def __write_in_console(self, code, message): <NEW_LINE> <INDENT> self.__console.SetForegroundColour(wx.WHITE) <NEW_LINE> if code == Message.CONSOLE_LOG: <NEW_LINE> <INDENT> self.__console.AppendText(message) <NEW_LINE> <DEDENT> elif code == Message.CONSOLE_LOG_ERR or code == Message.ERROR_MESSAGE: <NEW_LINE> <INDENT> self.__console.SetForegroundColour(wx.RED) <NEW_LINE> self.__console.AppendText(message) <NEW_LINE> <DEDENT> self.__console.SetForegroundColour(wx.WHITE)
write a message in the console :param code: Message.CONSOLE_LOG =>white|Message.CONSOLE_LOG_ERR => red :param message: :return:
625941b5cc40096d6159574f
def _px(self, m): <NEW_LINE> <INDENT> mm = m * 1000 * self.scalefactor <NEW_LINE> px = mm * self.MM2PX <NEW_LINE> return px
Go from meters to pixels
625941b5c432627299f04a40
def create_weights_conditional(n_inputs, n_outputs, n_hiddens, n_comps): <NEW_LINE> <INDENT> Wx = tf.Variable(rng.randn(n_inputs, n_hiddens[0]) / np.sqrt(n_inputs + 1), dtype=dtype, name='Wx') <NEW_LINE> return (Wx,) + create_weights(n_outputs, n_hiddens, n_comps)
Creates all learnable weight matrices and bias vectors for a conditional made. :param n_inputs: the number of (conditional) inputs :param n_outputs: the number of outputs :param n_hiddens: a list with the number of hidden units :param n_comps: number of gaussian components :return: weights and biases, as tensorflow variables
625941b5dc8b845886cb5330
def reprocess(product_id, valid): <NEW_LINE> <INDENT> req = requests.get( f"https://mesonet.agron.iastate.edu/api/1/nwstext/{product_id}" ) <NEW_LINE> tmpfn = f"/tmp/{product_id}.txt" <NEW_LINE> with open(tmpfn, "wb") as fh: <NEW_LINE> <INDENT> fh.write(req.content) <NEW_LINE> <DEDENT> cmd = f"python ~/projects/pyWWA/util/make_text_noaaportish.py {tmpfn}" <NEW_LINE> subprocess.call(cmd, shell=True) <NEW_LINE> cmd = ( f"cat {tmpfn} | python ~/projects/pyWWA/parsers/spc_parser.py -x " f"-u {valid.strftime('%Y-%m-%dT00:00Z')}" ) <NEW_LINE> subprocess.call(cmd, shell=True)
Send this product_id back through the meatgrinder.
625941b5cdde0d52a9e52e2a
def _wigner_fourier(psi, xvec, g=np.sqrt(2)): <NEW_LINE> <INDENT> if psi.type == 'bra': <NEW_LINE> <INDENT> psi = psi.dag() <NEW_LINE> <DEDENT> if psi.type == 'ket': <NEW_LINE> <INDENT> return _psi_wigner_fft(psi.full(), xvec, g) <NEW_LINE> <DEDENT> elif psi.type == 'oper': <NEW_LINE> <INDENT> eig_vals, eig_vecs = la.eigh(psi.full()) <NEW_LINE> W = 0 <NEW_LINE> for ii in range(psi.shape[0]): <NEW_LINE> <INDENT> W1, yvec = _psi_wigner_fft( np.reshape(eig_vecs[:, ii], (psi.shape[0], 1)), xvec, g) <NEW_LINE> W += eig_vals[ii] * W1 <NEW_LINE> <DEDENT> return W, yvec
Evaluate the Wigner function via the Fourier transform.
625941b5f548e778e58cd377
def get_method_kind(method, add_args=0): <NEW_LINE> <INDENT> if method is None: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> fsig=FunctionSignature.from_function(method) <NEW_LINE> if len(fsig.arg_names)>=1+add_args and fsig.arg_names[0]=="name": <NEW_LINE> <INDENT> return "named" <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return "simple"
Determine whether the method takes name as its argument `add_args` specifies number of additional required arguments. Return ``"named"`` is the method has at least ``add_args+1`` arguments, and the first one is called ``"name"``. Otherwise, return ``"simple"``.
625941b57047854f462a1209
def get(self, key): <NEW_LINE> <INDENT> return super().get(key, set())
Get a set in the dictionary, defaulting to the empty set if not found
625941b5d18da76e235322cd
def __init__(self, game): <NEW_LINE> <INDENT> self.game = game <NEW_LINE> self.states = { PROP_VIEW: STATE_MAPVIEW, PROP_MAP: 0, PROP_GRIDLINES: False, PROP_PLAYERS: True, PROP_UPDATE_MAIN: True, PROP_GM_VIEW: STATE_NORMAL, PROP_SHOW_TOKEN: True, PROP_ZOOM: 1, PROP_TRANS_X: 0, PROP_TRANS_Y: 0, } <NEW_LINE> self.main_view = View() <NEW_LINE> self.gm_view = View() <NEW_LINE> cv.namedWindow("main", cv.WINDOW_NORMAL) <NEW_LINE> cv.namedWindow("gm", cv.WINDOW_NORMAL)
save a reference to the game and set parameters :param game: The game of which the current map is going to be displayed
625941b5d10714528d5ffada
def __number_of_all_unique_kmers(s): <NEW_LINE> <INDENT> n = len(s) <NEW_LINE> uniques = set() <NEW_LINE> for k in range(1, n+1): <NEW_LINE> <INDENT> uniques.update(__list_unique_kmers(s, k)) <NEW_LINE> <DEDENT> return len(uniques)
Finds the number of unique k-mers in s for all k in [1, len(s)].
625941b52eb69b55b151c6a6
def build_parse_tree(sentence, bp_table, start, end, start_symbol): <NEW_LINE> <INDENT> if (start,end,start_symbol) in bp_table: <NEW_LINE> <INDENT> left_index = bp_table[(start,end,start_symbol)][0] <NEW_LINE> left = build_parse_tree(sentence, bp_table, left_index[0], left_index[1], left_index[2]) <NEW_LINE> right_index = bp_table[(start,end,start_symbol)][1] <NEW_LINE> right = build_parse_tree(sentence, bp_table, right_index[0], right_index[1], right_index[2]) <NEW_LINE> return [start_symbol, left, right] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return [start_symbol, sentence[start]]
given the target sentence, build a parse tree for it based on the bp_table.
625941b54f88993c3716be70
def BVA_categorical_plot(data, tar, cat,sig_level = 0.05): <NEW_LINE> <INDENT> data = data[[cat,tar]][:] <NEW_LINE> table = pd.crosstab(data[tar],data[cat],) <NEW_LINE> f_obs = np.array([table.iloc[0][:].values,table.iloc[1][:].values]) <NEW_LINE> from scipy.stats import chi2_contingency <NEW_LINE> chi, p, dof, expected = chi2_contingency(f_obs) <NEW_LINE> if p<sig_level: <NEW_LINE> <INDENT> sig = True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> sig = False <NEW_LINE> <DEDENT> plt.figure(figsize=(20,4)) <NEW_LINE> fig, axes = plt.subplots(1,2) <NEW_LINE> ax1 = data.groupby(cat)[tar].value_counts(normalize=False).unstack().round(4) <NEW_LINE> sns.countplot(x=cat, hue=tar, data=data, ax = axes[0]) <NEW_LINE> axes[0].set_xticklabels(data[cat].cat.categories,rotation=90) <NEW_LINE> axes[0].title.set_text("p-value = {}\n Significance level: {}\n difference significant? = {}\n\n{}".format(round(p,8), sig_level,sig,str(ax1))) <NEW_LINE> ax1 = data.groupby(cat)[tar].value_counts(normalize=True).unstack().round(4) <NEW_LINE> ax1.plot(kind='bar', stacked='True',title=str(ax1),ax= axes[1], figsize=(15,5)) <NEW_LINE> plt.xticks(rotation=90) <NEW_LINE> plt.ylabel("Percentage")
take data and two categorical variables, calculates the chi2 significance between the two variables and prints the result with countplot & CrossTab
625941b597e22403b379cd94
def is_no_record_found_message_displayed_in_customer_offer_preview_grid(self): <NEW_LINE> <INDENT> return self.is_element_present(self.customer_offer_preview_grid_no_record_found_message_locator)
Implementing is no record found message displayed in customer offer preview grid functionality :return:
625941b5be7bc26dc91cd402
def _check_error(self, ws: WebSocketApp, error: Exception) -> None: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> with self._reader_guard: <NEW_LINE> <INDENT> if self._reader_status == ReaderStatus.DONE and isinstance(error, OSError) and error.errno == 9: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> <DEDENT> self._set_error(error) <NEW_LINE> self.on_error(ws, error) <NEW_LINE> <DEDENT> except Exception as err: <NEW_LINE> <INDENT> self._set_error(err)
Just log the error and propagate it to *self.on_error*. :param ws: WebSocketApp :param error: Exception
625941b510dbd63aa1bd29ac
def bst_insert(self, root, key): <NEW_LINE> <INDENT> left = False <NEW_LINE> if not root: <NEW_LINE> <INDENT> return TreeNode(key) <NEW_LINE> <DEDENT> if key == root.key: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> elif key < root.key: <NEW_LINE> <INDENT> next_node = root.left <NEW_LINE> left = True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> next_node = root.right <NEW_LINE> left = False <NEW_LINE> <DEDENT> if next_node is not None: <NEW_LINE> <INDENT> self.bst_insert(next_node, key) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if left: <NEW_LINE> <INDENT> root.left = TreeNode(key) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> root.right = TreeNode(key) <NEW_LINE> <DEDENT> <DEDENT> return root
A recursive function to insert a new TreeNode into the tree without rotation balancing. :param root: The root node of the tree or None. :param key: The new key to insert. :return: The updated root node or a new TreeNode(key)
625941b5596a8972360898c6
def nodes(self): <NEW_LINE> <INDENT> return self.graph.nodes()
An iterator of all nodes in the graph. :return: The iterator. :rtype: iterator
625941b5be383301e01b5289
def test_insert_and_replace_one(self): <NEW_LINE> <INDENT> member = Member() <NEW_LINE> member2 = Member() <NEW_LINE> member2.last_name = "Test" <NEW_LINE> member2.first_name = "Test" <NEW_LINE> insert_result = self.DB.insert_one(db_constants.MEMBERS, member) <NEW_LINE> self.DB.replace_one(db_constants.MEMBERS, {db_constants.ENTRY_ID: insert_result.inserted_id}, member2) <NEW_LINE> result = self.DB.find_one(db_constants.MEMBERS, {db_constants.ENTRY_ID: insert_result.inserted_id}) <NEW_LINE> self.convertUnicodeToAscii(result) <NEW_LINE> self.assertTrue(db_constants.ENTRY_ID in result.__dict__) <NEW_LINE> result.__dict__.pop(db_constants.ENTRY_ID) <NEW_LINE> self.assertNotEqual(member.__dict__, result.__dict__) <NEW_LINE> self.assertEqual(member2.__dict__, result.__dict__)
Verifies a custom class can be properly replaced.
625941b5287bf620b61d386c
def get_args(tp, evaluate=None): <NEW_LINE> <INDENT> if NEW_TYPING: <NEW_LINE> <INDENT> if evaluate is not None and not evaluate: <NEW_LINE> <INDENT> raise ValueError('evaluate can only be True in Python >= 3.7') <NEW_LINE> <DEDENT> if isinstance(tp, _GenericAlias): <NEW_LINE> <INDENT> return tp.__args__ <NEW_LINE> <DEDENT> return () <NEW_LINE> <DEDENT> if is_generic_type(tp) or is_union_type(tp) or is_tuple_type(tp): <NEW_LINE> <INDENT> tree = tp._subs_tree() <NEW_LINE> if isinstance(tree, tuple) and len(tree) > 1: <NEW_LINE> <INDENT> if not evaluate: <NEW_LINE> <INDENT> return tree[1:] <NEW_LINE> <DEDENT> return _eval_args(tree[1:]) <NEW_LINE> <DEDENT> <DEDENT> return ()
Get type arguments with all substitutions performed. For unions, basic simplifications used by Union constructor are performed. On versions prior to 3.7 if `evaluate` is False (default), report result as nested tuple, this matches the internal representation of types. If `evaluate` is True (or if Python version is 3.7 or greater), then all type parameters are applied (this could be time and memory expensive). Examples:: get_args(int) == () get_args(Union[int, Union[T, int], str][int]) == (int, str) get_args(Union[int, Tuple[T, int]][str]) == (int, (Tuple, str, int)) get_args(Union[int, Tuple[T, int]][str], evaluate=True) == (int, Tuple[str, int]) get_args(Dict[int, Tuple[T, T]][Optional[int]], evaluate=True) == (int, Tuple[Optional[int], Optional[int]]) get_args(Callable[[], T][int], evaluate=True) == ([], int,)
625941b5e5267d203edcda9d
def _name_from_project_path(path, project, template): <NEW_LINE> <INDENT> if isinstance(template, str): <NEW_LINE> <INDENT> template = re.compile(template) <NEW_LINE> <DEDENT> match = template.match(path) <NEW_LINE> if not match: <NEW_LINE> <INDENT> raise ValueError( 'path "%s" did not match expected pattern "%s"' % (path, template.pattern) ) <NEW_LINE> <DEDENT> if project is not None: <NEW_LINE> <INDENT> found_project = match.group("project") <NEW_LINE> if found_project != project: <NEW_LINE> <INDENT> raise ValueError( "Project from client (%s) should agree with " "project from resource(%s)." % (project, found_project) ) <NEW_LINE> <DEDENT> <DEDENT> return match.group("name")
Validate a URI path and get the leaf object's name. :type path: str :param path: URI path containing the name. :type project: str :param project: (Optional) The project associated with the request. It is included for validation purposes. If passed as None, disables validation. :type template: str :param template: Template regex describing the expected form of the path. The regex must have two named groups, 'project' and 'name'. :rtype: str :returns: Name parsed from ``path``. :raises ValueError: if the ``path`` is ill-formed or if the project from the ``path`` does not agree with the ``project`` passed in.
625941b54428ac0f6e5ba5f6
def __init__(self, collection, page=1, items_per_page=20, item_count=None, wrapper_class=None, url_maker=None, **kwargs): <NEW_LINE> <INDENT> if collection is not None: <NEW_LINE> <INDENT> if wrapper_class is None: <NEW_LINE> <INDENT> self.collection = collection <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.collection = wrapper_class(collection) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> self.collection = [] <NEW_LINE> <DEDENT> self.collection_type = type(collection) <NEW_LINE> if url_maker is not None: <NEW_LINE> <INDENT> self.url_maker = url_maker <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.url_maker = self._default_url_maker <NEW_LINE> <DEDENT> self.kwargs = kwargs <NEW_LINE> try: <NEW_LINE> <INDENT> self.page = int(page) <NEW_LINE> <DEDENT> except (ValueError, TypeError): <NEW_LINE> <INDENT> self.page = 1 <NEW_LINE> <DEDENT> if self.page < 1: <NEW_LINE> <INDENT> self.page = 1 <NEW_LINE> <DEDENT> self.items_per_page = items_per_page <NEW_LINE> try: <NEW_LINE> <INDENT> first = (self.page - 1) * items_per_page <NEW_LINE> last = first + items_per_page <NEW_LINE> self.items = list(self.collection[first:last]) <NEW_LINE> <DEDENT> except TypeError: <NEW_LINE> <INDENT> raise TypeError("Your collection of type "+type(self.collection)+ " cannot be handled by paginate.") <NEW_LINE> <DEDENT> if item_count is not None: <NEW_LINE> <INDENT> self.item_count = item_count <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.item_count = len(self.collection) <NEW_LINE> <DEDENT> if self.item_count > 0: <NEW_LINE> <INDENT> self.first_page = 1 <NEW_LINE> self.page_count = ((self.item_count - 1) // self.items_per_page) + 1 <NEW_LINE> self.last_page = self.first_page + self.page_count - 1 <NEW_LINE> if self.page > self.last_page: <NEW_LINE> <INDENT> self.page = self.last_page <NEW_LINE> <DEDENT> elif self.page < self.first_page: <NEW_LINE> <INDENT> self.page = self.first_page <NEW_LINE> <DEDENT> self.first_item = (self.page - 1) * items_per_page + 1 <NEW_LINE> self.last_item = min(self.first_item + items_per_page - 1, self.item_count) <NEW_LINE> if self.page > self.first_page: <NEW_LINE> <INDENT> self.previous_page = self.page-1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.previous_page = None <NEW_LINE> <DEDENT> if self.page < self.last_page: <NEW_LINE> <INDENT> self.next_page = self.page+1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.next_page = None <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> self.first_page = None <NEW_LINE> self.page_count = 0 <NEW_LINE> self.last_page = None <NEW_LINE> self.first_item = None <NEW_LINE> self.last_item = None <NEW_LINE> self.previous_page = None <NEW_LINE> self.next_page = None <NEW_LINE> self.items = [] <NEW_LINE> <DEDENT> list.__init__(self, self.items)
Create a "Page" instance. Parameters: collection Sequence representing the collection of items to page through. page The requested page number - starts with 1. Default: 1. items_per_page The maximal number of items to be displayed per page. Default: 20. item_count (optional) The total number of items in the collection - if known. If this parameter is not given then the paginator will count the number of elements in the collection every time a "Page" is created. Giving this parameter will speed up things. In a busy real-life application you may want to cache the number of items. url_maker (optional) Callback to generate the URL of other pages, given its numbers. Must accept one int parameter and return a URI string.
625941b59c8ee82313fbb573
def set_unauthorized_token(self): <NEW_LINE> <INDENT> self.session['_utoken'] = self.unauthorized_token() <NEW_LINE> self.session.save() <NEW_LINE> return self.session['_utoken']
Save unauthorized token in session.
625941b5baa26c4b54cb0f20
@login_required <NEW_LINE> @group_required(SEC_GROUP_NAMES['index']) <NEW_LINE> def indexing_select_type(request, step=None, template='mdtui/indexing.html'): <NEW_LINE> <INDENT> context = {'step': step,} <NEW_LINE> docrule = None <NEW_LINE> active_docrule = None <NEW_LINE> warnings = [] <NEW_LINE> docrules_list = make_document_type_select(user=request.user) <NEW_LINE> cleanup_search_session(request) <NEW_LINE> cleanup_mdts(request) <NEW_LINE> log.debug('indexing_select_type view called with docrule: %s' % docrule) <NEW_LINE> if request.POST: <NEW_LINE> <INDENT> for item, value in request.POST.iteritems(): <NEW_LINE> <INDENT> if not item == u'csrfmiddlewaretoken': <NEW_LINE> <INDENT> docrule = int(item) <NEW_LINE> <DEDENT> <DEDENT> request.session['indexing_docrule_id'] = docrule <NEW_LINE> mdts = get_mdts_for_docrule(docrule) <NEW_LINE> if mdts: <NEW_LINE> <INDENT> request.session['mdts'] = mdts <NEW_LINE> return HttpResponseRedirect(reverse('mdtui-index-details')) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> warnings.append(MDTUI_ERROR_STRINGS['NO_MDTS']) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> docrule = request.session['indexing_docrule_id'] <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> if docrule: <NEW_LINE> <INDENT> active_docrule = docrule <NEW_LINE> <DEDENT> <DEDENT> context.update({ 'active_docrule': active_docrule, 'docrules_list': docrules_list, 'warnings': warnings, }) <NEW_LINE> return render_to_response(template, context, context_instance=RequestContext(request))
Indexing: Step 1 : Select Document Type
625941b599cbb53fe67929e3
def test_with_file_management(self): <NEW_LINE> <INDENT> device_key = "some_key" <NEW_LINE> device_password = "some_password" <NEW_LINE> actuator_references = [] <NEW_LINE> device = Device(device_key, device_password, actuator_references) <NEW_LINE> file_directory = "files" <NEW_LINE> wolk_device = WolkConnect(device).with_file_management( 256, 1024, file_directory ) <NEW_LINE> self.assertTrue(os.path.exists(file_directory)) <NEW_LINE> os.rmdir(file_directory)
Test enabling file management module.
625941b5be383301e01b528a
def reweight_crush_items(self, **kwargs) -> bool: <NEW_LINE> <INDENT> cmd = "ceph osd df tree" <NEW_LINE> out = self.run_ceph_command(cmd=cmd) <NEW_LINE> osd_info_init = [entry for entry in out["nodes"] if entry["type"] == "osd"] <NEW_LINE> affected_osds = [] <NEW_LINE> if kwargs.get("name"): <NEW_LINE> <INDENT> name = kwargs["name"] <NEW_LINE> weight = kwargs["weight"] <NEW_LINE> cmd = f"ceph osd crush reweight {name} {weight}" <NEW_LINE> out = self.run_ceph_command(cmd=cmd) <NEW_LINE> affected_osds.append(name) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> cmd = r"ceph osd reweight-by-utilization" <NEW_LINE> out = self.run_ceph_command(cmd=cmd) <NEW_LINE> if int(out["max_change_osds"]) >= 1: <NEW_LINE> <INDENT> affected_osds = [entry["osd"] for entry in out["reweights"]] <NEW_LINE> log.info( f"re-weights have been triggered on these OSD's, Deatils\n" f"PG's affected : {out['utilization']['moved_pgs']}\n" f"OSd's affected: {[entry for entry in out['reweights']]}" ) <NEW_LINE> time.sleep(5) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> log.info( "No re-weights based on utilization were triggered. PG distribution is optimal" ) <NEW_LINE> return True <NEW_LINE> <DEDENT> <DEDENT> if kwargs.get("verify_reweight"): <NEW_LINE> <INDENT> if not self.verify_reweight( affected_osds=affected_osds, osd_info=osd_info_init ): <NEW_LINE> <INDENT> log.error("OSD utilization was not reduced upon re-weight") <NEW_LINE> return False <NEW_LINE> <DEDENT> <DEDENT> log.info("Completed the re-weight of OSD's") <NEW_LINE> return True
Performs Re-weight of various CRUSH items, based on key-value pairs sent Args: **kwargs: Arguments for the commands Returns: True -> pass, False -> fail
625941b5fbf16365ca6f5fb7
def format_wf_steps(wf, gi): <NEW_LINE> <INDENT> body = '' <NEW_LINE> steps = wf['steps'] <NEW_LINE> for s in range(len(steps)): <NEW_LINE> <INDENT> wf_step = steps[str(s)] <NEW_LINE> wf_param_values = {} <NEW_LINE> if wf_step['tool_state'] and wf_step['input_connections']: <NEW_LINE> <INDENT> wf_param_values = get_wf_param_values(wf_step['tool_state'], get_wf_inputs(wf_step['input_connections'])) <NEW_LINE> <DEDENT> if not wf_param_values: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> tool_desc = gi.tools.show_tool(wf_step['tool_id'], io_details=True) <NEW_LINE> <DEDENT> except Exception: <NEW_LINE> <INDENT> tool_desc = {'inputs': []} <NEW_LINE> <DEDENT> paramlist = '' <NEW_LINE> for inp in tool_desc["inputs"]: <NEW_LINE> <INDENT> tool_inp = ToolInput(inp, wf_param_values, steps, 1, should_be_there=True) <NEW_LINE> paramlist += tool_inp.get_formatted_desc() <NEW_LINE> <DEDENT> body += templates.render(HANDS_ON_TOOL_BOX_TEMPLATE, **{ "tool_name": wf_step['name'], "paramlist": paramlist}) <NEW_LINE> <DEDENT> return body
Get a string with the hands-on boxes describing the different steps of the worklow.
625941b5566aa707497f4377
def getPara(vector1, vector2): <NEW_LINE> <INDENT> if isinstance(vector1, math3d.VectorN) and isinstance(vector2, math3d.VectorN): <NEW_LINE> <INDENT> v = vector1 <NEW_LINE> w = vector2 <NEW_LINE> newVector = (v.dot(w) / w.dot(w)) * w <NEW_LINE> return newVector <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return ValueError("getPara can only use two vectors of the same dimensions.")
:param vector1: This is the vector you are projecting on. :param vector2: this is the secondary vector. :return: a new vector parallel to vector 1 but the length it would need to be to make a right triangle with vector2
625941b538b623060ff0abeb
def calc3(num): <NEW_LINE> <INDENT> return -num
calculate2
625941b5293b9510aa2c3096
def build_features(df, cols_of_tfidf, n_components, minimal_community_size): <NEW_LINE> <INDENT> print('--Computing Tf-Idf features') <NEW_LINE> t0 = time.time() <NEW_LINE> tfIdfer = TfIdfer() <NEW_LINE> tf_idf_features_dict = tfIdfer.get_features(df, cols_of_tfidf) <NEW_LINE> print("--Tfidf total running time is: {} ".format(time.time() - t0)) <NEW_LINE> print("--Tf-idf feature dimension reduction") <NEW_LINE> t0 = time.time() <NEW_LINE> tfifd_features_sprs_matrix = sparse.hstack(list(tf_idf_features_dict.values())) <NEW_LINE> del tf_idf_features_dict <NEW_LINE> lsa = make_pipeline(TruncatedSVD(n_components), Normalizer(copy=False)) <NEW_LINE> tfifd_features_matrix = lsa.fit_transform(tfifd_features_sprs_matrix) <NEW_LINE> print("--Dimension reduction total running time is: {} ".format(time.time() - t0)) <NEW_LINE> print('Getting graph features') <NEW_LINE> t0 = time.time() <NEW_LINE> graph_geatures_builder = GraphFeaturesBuilder() <NEW_LINE> graph_features_dict = graph_geatures_builder.get_features(df, minimal_community_size) <NEW_LINE> print("Graph features total running time is: {} ".format(time.time() - t0)) <NEW_LINE> print("--Graph feature dimension reduction") <NEW_LINE> t0 = time.time() <NEW_LINE> graph_features_sprs_matrix = sparse.hstack(list(graph_features_dict.values())) <NEW_LINE> del graph_features_dict <NEW_LINE> lsa = make_pipeline(TruncatedSVD(n_components), Normalizer(copy=False)) <NEW_LINE> graph_features_matrix = lsa.fit_transform(graph_features_sprs_matrix) <NEW_LINE> print("--Dimension reduction total running time is: {} ".format(time.time() - t0)) <NEW_LINE> features_matrix = np.hstack([tfifd_features_matrix,graph_features_matrix]) <NEW_LINE> return features_matrix
build all the features of the patents :param df: pandas DataFrame contains the patents data :param cols_of_tfidf: list of columns names to get tf-idf vectors for this columns :param n_components: number of components to save after dimension reducing of tfidf matrices :return: scipy csr matrix of shape (n_patents, n_features)
625941b53539df3088e2e147
def train_model(dataset, paths, device): <NEW_LINE> <INDENT> iterator = data.get_dataset_iterator("train", dataset, paths["data"]) <NEW_LINE> next_element, train_init_op, valid_init_op = iterator <NEW_LINE> input_images, ground_truths = next_element[:2] <NEW_LINE> input_plhd = tf.placeholder_with_default(input_images, (None, None, None, 3), name="input") <NEW_LINE> msi_net = model.MSINET() <NEW_LINE> predicted_maps = msi_net.forward(input_plhd) <NEW_LINE> optimizer, loss = msi_net.train(ground_truths, predicted_maps, config.PARAMS["learning_rate"]) <NEW_LINE> n_train_data = getattr(data, dataset.upper()).n_train <NEW_LINE> n_valid_data = getattr(data, dataset.upper()).n_valid <NEW_LINE> n_train_batches = int(np.ceil(n_train_data / config.PARAMS["batch_size"])) <NEW_LINE> n_valid_batches = int(np.ceil(n_valid_data / config.PARAMS["batch_size"])) <NEW_LINE> history = utils.History(n_train_batches, n_valid_batches, dataset, paths["history"], device) <NEW_LINE> progbar = utils.Progbar(n_train_data, n_train_batches, config.PARAMS["batch_size"], config.PARAMS["n_epochs"], history.prior_epochs) <NEW_LINE> with tf.Session() as sess: <NEW_LINE> <INDENT> sess.run(tf.global_variables_initializer()) <NEW_LINE> saver = msi_net.restore(sess, dataset, paths, device) <NEW_LINE> print(">> Start training on %s..." % dataset.upper()) <NEW_LINE> for epoch in range(config.PARAMS["n_epochs"]): <NEW_LINE> <INDENT> sess.run(train_init_op) <NEW_LINE> for batch in range(n_train_batches): <NEW_LINE> <INDENT> _, error = sess.run([optimizer, loss]) <NEW_LINE> history.update_train_step(error) <NEW_LINE> progbar.update_train_step(batch) <NEW_LINE> <DEDENT> sess.run(valid_init_op) <NEW_LINE> for batch in range(n_valid_batches): <NEW_LINE> <INDENT> error = sess.run(loss) <NEW_LINE> history.update_valid_step(error) <NEW_LINE> progbar.update_valid_step() <NEW_LINE> <DEDENT> msi_net.save(saver, sess, dataset, paths["latest"], device) <NEW_LINE> history.save_history() <NEW_LINE> progbar.write_summary(history.get_mean_train_error(), history.get_mean_valid_error()) <NEW_LINE> if history.valid_history[-1] == min(history.valid_history): <NEW_LINE> <INDENT> msi_net.save(saver, sess, dataset, paths["best"], device) <NEW_LINE> msi_net.optimize(sess, dataset, paths["best"], device) <NEW_LINE> print("\tBest model!", flush=True)
The main function for executing network training. It loads the specified dataset iterator, saliency model, and helper classes. Training is then performed in a new session by iterating over all batches for a number of epochs. After validation on an independent set, the model is saved and the training history is updated. Args: dataset (str): Denotes the dataset to be used during training. paths (dict, str): A dictionary with all path elements. device (str): Represents either "cpu" or "gpu".
625941b5462c4b4f79d1d4cc
def parse_function(lexer, module): <NEW_LINE> <INDENT> func = parse_function_definition(lexer, module) <NEW_LINE> while True: <NEW_LINE> <INDENT> token, _ = lexer.get_next_token(peek=True, accept_eol=True) <NEW_LINE> if token == '': <NEW_LINE> <INDENT> lexer.done_with_line() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> lexer.get_next_token('{') <NEW_LINE> lexer.done_with_line() <NEW_LINE> while True: <NEW_LINE> <INDENT> token, tag = lexer.get_next_token(peek=True, accept_eol=True) <NEW_LINE> if token == '': <NEW_LINE> <INDENT> lexer.done_with_line() <NEW_LINE> <DEDENT> elif token == '}': <NEW_LINE> <INDENT> lexer.get_next_token() <NEW_LINE> lexer.done_with_line() <NEW_LINE> return func <NEW_LINE> <DEDENT> elif tag == 'LABEL': <NEW_LINE> <INDENT> parse_basic_block(lexer, module, func) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ParseError('Expected a label or }')
Parse a pretty-printed function.
625941b5adb09d7d5db6c590
def list_connections( self, resource_group_name, virtual_network_gateway_name, **kwargs ): <NEW_LINE> <INDENT> cls = kwargs.pop('cls', None) <NEW_LINE> error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } <NEW_LINE> error_map.update(kwargs.pop('error_map', {})) <NEW_LINE> api_version = "2018-01-01" <NEW_LINE> accept = "application/json, text/json" <NEW_LINE> def prepare_request(next_link=None): <NEW_LINE> <INDENT> header_parameters = {} <NEW_LINE> header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') <NEW_LINE> if not next_link: <NEW_LINE> <INDENT> url = self.list_connections.metadata['url'] <NEW_LINE> path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } <NEW_LINE> url = self._client.format_url(url, **path_format_arguments) <NEW_LINE> query_parameters = {} <NEW_LINE> query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') <NEW_LINE> request = self._client.get(url, query_parameters, header_parameters) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> url = next_link <NEW_LINE> query_parameters = {} <NEW_LINE> request = self._client.get(url, query_parameters, header_parameters) <NEW_LINE> <DEDENT> return request <NEW_LINE> <DEDENT> def extract_data(pipeline_response): <NEW_LINE> <INDENT> deserialized = self._deserialize('VirtualNetworkGatewayListConnectionsResult', pipeline_response) <NEW_LINE> list_of_elem = deserialized.value <NEW_LINE> if cls: <NEW_LINE> <INDENT> list_of_elem = cls(list_of_elem) <NEW_LINE> <DEDENT> return deserialized.next_link or None, iter(list_of_elem) <NEW_LINE> <DEDENT> def get_next(next_link=None): <NEW_LINE> <INDENT> request = prepare_request(next_link) <NEW_LINE> pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) <NEW_LINE> response = pipeline_response.http_response <NEW_LINE> if response.status_code not in [200]: <NEW_LINE> <INDENT> map_error(status_code=response.status_code, response=response, error_map=error_map) <NEW_LINE> raise HttpResponseError(response=response, error_format=ARMErrorFormat) <NEW_LINE> <DEDENT> return pipeline_response <NEW_LINE> <DEDENT> return ItemPaged( get_next, extract_data )
Gets all the connections in a virtual network gateway. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param virtual_network_gateway_name: The name of the virtual network gateway. :type virtual_network_gateway_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either VirtualNetworkGatewayListConnectionsResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_01_01.models.VirtualNetworkGatewayListConnectionsResult] :raises: ~azure.core.exceptions.HttpResponseError
625941b501c39578d7e74c40
def testTokenList(self): <NEW_LINE> <INDENT> model = graylog.models.token_list.TokenList()
Test TokenList
625941b566656f66f7cbbfa6
def __init__(self, base_directory): <NEW_LINE> <INDENT> self.base_directory = base_directory <NEW_LINE> self.index_path = os.path.join(self.base_directory, 'index') <NEW_LINE> self.docs_path = os.path.join(self.base_directory, 'documents') <NEW_LINE> self.stats_path = os.path.join(self.base_directory, 'stats.json') <NEW_LINE> self.setup()
Sets up the object & the data directory. Requires a ``base_directory`` parameter, which specifies the parent directory the index/document/stats data will be kept in. Example:: ms = microsearch.Microsearch('/var/my_index')
625941b5796e427e537b03be
def setup(hass, config): <NEW_LINE> <INDENT> api_key = config[DOMAIN][CONF_API_KEY] <NEW_LINE> global BLOOMSKY <NEW_LINE> try: <NEW_LINE> <INDENT> BLOOMSKY = BloomSky(api_key, hass.config.units.is_metric) <NEW_LINE> <DEDENT> except RuntimeError: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> for component in BLOOMSKY_TYPE: <NEW_LINE> <INDENT> discovery.load_platform(hass, component, DOMAIN, {}, config) <NEW_LINE> <DEDENT> return True
Set up the BloomSky component.
625941b55166f23b2e1a4f55
def _input_fn(filenames, feature_specs, label_key, batch_size=200): <NEW_LINE> <INDENT> dataset = tf.data.experimental.make_batched_features_dataset( file_pattern=filenames, batch_size=batch_size, features=feature_specs, label_key=label_key, reader=_gzip_reader_fn) <NEW_LINE> return dataset
Generates features and labels for training or evaluation. Args: filenames: [str] list of CSV files to read data from. tf_transform_output: A TFTransformOutput. batch_size: int First dimension size of the Tensors returned by input_fn Returns: A (features, indices) tuple where features is a dictionary of Tensors, and indices is a single Tensor of label indices.
625941b5d53ae8145f87a073
@permission_required("core.manage_shop", login_url="/login/") <NEW_LINE> def save_payment_method_criteria(request, payment_method_id): <NEW_LINE> <INDENT> payment_method = lfs_get_object_or_404(PaymentMethod, pk=payment_method_id) <NEW_LINE> criteria_utils.save_criteria(request, payment_method) <NEW_LINE> html = [["#criteria", payment_method_criteria(request, payment_method_id)]] <NEW_LINE> result = simplejson.dumps({ "html": html, "message": _(u"Modifications have been changed."), }, cls=LazyEncoder) <NEW_LINE> return HttpResponse(result)
Saves the criteria for the payment method with given id. The criteria are passed via request body.
625941b57047854f462a120a
def print_bday_info(bday): <NEW_LINE> <INDENT> weekday_birth_year = calc_birthday_day_of_week(bday) <NEW_LINE> print('Looks like you were born on a {}'.format(weekday_birth_year)) <NEW_LINE> today = datetime.date.today() <NEW_LINE> bday_this_year = datetime.date(today.year, bday.month, bday.day) <NEW_LINE> weekday_this_year = calc_birthday_day_of_week(bday_this_year) <NEW_LINE> td = bday_this_year - today <NEW_LINE> days_to_bday = td.days <NEW_LINE> if days_to_bday < 0: <NEW_LINE> <INDENT> print('Your birthday this year was on {0}, {1} days ago'.format(weekday_this_year, -days_to_bday)) <NEW_LINE> <DEDENT> elif days_to_bday > 0: <NEW_LINE> <INDENT> print('Your birthday this year is on {0}, in {1} days!'.format(weekday_this_year, days_to_bday)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print('Happy birthday!!!')
Print the relevant information for a given birthday
625941b51f037a2d8b945ffb
@mock.patch('CuckooAPI.requests.get') <NEW_LINE> def test_sampledownload_exception(mock_get): <NEW_LINE> <INDENT> mock_get.return_value.status_code = 404 <NEW_LINE> api = CuckooAPI.CuckooAPI() <NEW_LINE> ExceptionThrown = False <NEW_LINE> try: <NEW_LINE> <INDENT> api.sampledownload() <NEW_LINE> <DEDENT> except CuckooAPI.CuckooAPINoHash: <NEW_LINE> <INDENT> ExceptionThrown = True <NEW_LINE> <DEDENT> assert ExceptionThrown is True <NEW_LINE> ExceptionThrown = False <NEW_LINE> try: <NEW_LINE> <INDENT> api.sampledownload('1', 'task', 'README.md') <NEW_LINE> <DEDENT> except CuckooAPI.CuckooAPIFileExists: <NEW_LINE> <INDENT> ExceptionThrown = True <NEW_LINE> <DEDENT> assert ExceptionThrown is True <NEW_LINE> ExceptionThrown = False <NEW_LINE> try: <NEW_LINE> <INDENT> os.remove('test1.bin') <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> api.sampledownload(1, 'task', 'test1.bin') <NEW_LINE> <DEDENT> except CuckooAPI.CuckooAPIBadRequest: <NEW_LINE> <INDENT> ExceptionThrown = True <NEW_LINE> <DEDENT> assert ExceptionThrown is True <NEW_LINE> try: <NEW_LINE> <INDENT> os.remove('test1.bin') <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> pass
Test a pretend sample download with exception
625941b5bde94217f3682bf9
def test_network_1_existing_vxlan_nodes_1_requested_vxlan_nodes( self, network_state='tests/bigip_test_vxlan_1_record.json', cloud_state='tests/openshift_1_node.json'): <NEW_LINE> <INDENT> self.read_test_vectors(cloud_state=cloud_state, network_state=network_state) <NEW_LINE> cfg = ctlr.create_network_config(self.cloud_data) <NEW_LINE> apply_network_fdb_config(self.mgr.mgmt_root(), cfg['fdb']) <NEW_LINE> self.assertEqual(self.mgr.mgmt_root(). tm.net.fdb.tunnels.tunnel.load.call_count, 1) <NEW_LINE> self.assertEqual(self.compute_fdb_records(), self.vxlan_tunnel.records)
Test: openshift environment with 1 nodes.
625941b5a8370b771705269e
def run_cmd(cmd, *args, **kwargs): <NEW_LINE> <INDENT> stdin = kwargs.pop('stdin', None) <NEW_LINE> cwd = kwargs.pop('cwd', None) <NEW_LINE> if kwargs: <NEW_LINE> <INDENT> env = os.environ.copy() <NEW_LINE> env.update(kwargs) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> env = None <NEW_LINE> <DEDENT> p = subprocess.Popen(['git', cmd] + list(args), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd, env=env) <NEW_LINE> (stdout, stderr) = p.communicate(stdin) <NEW_LINE> return (stdout, stderr, p.returncode)
Run a git command. :param cmd: git command name e.g. 'commit' :param args: the argument list e.g. ['-m', 'foo'] :param stdin: a string to pass via stdin :param cwd: the directory to run from :param kwargs: environment variables to set :returns: tuple with stdout, stderr and exit status
625941b5a17c0f6771cbde51
def read_searchcoil_list(sc_zip_list=''): <NEW_LINE> <INDENT> def df_sc_gen(sc_zip_list): <NEW_LINE> <INDENT> sample_rate = dt.timedelta(microseconds=100000) <NEW_LINE> for file in sc_zip_list: <NEW_LINE> <INDENT> file_start = dt.datetime.strptime(file[-26:-7], '%Y_%m_%d_%H_%M_%S') <NEW_LINE> df_in = pd.DataFrame(columns=['datetime', 'dBx', 'dBy']) <NEW_LINE> with gzip.open(file, mode='rb') as bitstream: <NEW_LINE> <INDENT> in_bits = bitstream.read().hex() <NEW_LINE> samples = [int(in_bits[i:i + 3], 16) for i in range(0, len(in_bits), 3)] <NEW_LINE> samples = [x - 4096 if x > 2047 else x for x in samples] <NEW_LINE> df_in['dBx'] = [samples[x] * (.0049 / 4.43) for x in range(0, len(samples), 2)] <NEW_LINE> df_in['dBy'] = [samples[x] * (.0049 / 4.43) for x in range(1, len(samples), 2)] <NEW_LINE> in_dates = pd.date_range(file_start, periods=len(in_bits) // 3, freq=sample_rate) <NEW_LINE> df_in['datetime'] = pd.Series(in_dates) <NEW_LINE> <DEDENT> yield df_in.astype({'datetime': np.dtype('<M8[ns]'), 'dBx': np.float16, 'dBy': np.float16}) <NEW_LINE> <DEDENT> <DEDENT> return pd.concat(df_sc_gen(sc_zip_list), ignore_index=True)
Read in a searchcoil filelist and return a dataframe Args: sc_zip_list (str, optional): Python list of full file names to read Returns: DataFrame: A pandas dataframe with the following columns: 'datetime', 'dBx', 'dBy'
625941b57b180e01f3dc4604
def prepend(self, item): <NEW_LINE> <INDENT> if self.is_empty(): <NEW_LINE> <INDENT> node = Node(item) <NEW_LINE> self.head = node <NEW_LINE> self.tail = node <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> node = Node(item) <NEW_LINE> node.next = self.head <NEW_LINE> self.head = node
Insert the given item at the head of this linked list. TODO: Running time: O(???) Why and under what conditions?
625941b5ac7a0e7691ed3ed7
def save_crypto_operations_pkl(self): <NEW_LINE> <INDENT> logger.info("CryptoManager - save_crypto_operations_pkl") <NEW_LINE> if not self.dfop.empty: <NEW_LINE> <INDENT> self.dfop.to_pickle(os.path.join(self.cwd, "pd_raw_data", "crypto_operations.pkl"))
Save operations dataframe in pickle format
625941b5097d151d1a222c59
def amax_files(): <NEW_LINE> <INDENT> return [os.path.join(dp, f) for dp, dn, filenames in os.walk(CACHE_FOLDER) for f in filenames if os.path.splitext(f)[1].lower() == '.am']
Return all annual maximum flow (`*.am`) files in cache folder and sub folders. :return: List of file paths :rtype: list
625941b56aa9bd52df036b9f
def _handle_deferred_gc(self, ref, request): <NEW_LINE> <INDENT> logger.debug('Garbage collecting for request: %s'%request) <NEW_LINE> if not request.finished and request.channel: <NEW_LINE> <INDENT> request.finish() <NEW_LINE> <DEDENT> self._weakrefs.remove(ref)
This function is called when the request_proxy in the baton have been finalized.
625941b5cdde0d52a9e52e2b
def is_authenticated(): <NEW_LINE> <INDENT> return ('user' in session) and session['user']['id'] > 0
- Checks if a current user is authenticated.
625941b5099cdd3c635f0a5a
def value(self, node): <NEW_LINE> <INDENT> if node.children: <NEW_LINE> <INDENT> return node.children[0].value() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return None
The *value* of a :class:`Optional` is the value of its child, if the child did match the recognition. Otherwise the *value* is *None*.
625941b5507cdc57c6306acf
def test__redact_year(): <NEW_LINE> <INDENT> values = pd.Series([1923, 2003, '<foo', '>testing']) <NEW_LINE> redacted = _redact_year(values) <NEW_LINE> expected_redact = [1923, 2003, 'withheld', 'cannotReleaseHIPAA'] <NEW_LINE> assert redacted.to_list() == expected_redact
Tests redaction of birth year based on < and >
625941b576e4537e8c351474
def testHuidigeDatum(self): <NEW_LINE> <INDENT> vandaag = mvhutils.huidige_datum() <NEW_LINE> self.assertEqual(VANDAAG, vandaag)
Datum van vandaag wordt terug gegeven
625941b58e05c05ec3eea16e
def top_share_in_best_broker(from_month, to_month, month_to_judge, broker_top_n, min_recommend_num): <NEW_LINE> <INDENT> recommend = get_data(from_month, to_month) <NEW_LINE> recommend = recommend.groupby(['broker', 'month'], as_index=False).agg({'ts_code': split_char.join}) <NEW_LINE> recommend['ret'] = recommend.apply(lambda row: portfolio_return.cal(row.ts_code.split(split_char), row.month + '01', date_utils.last_dt_of_month(row.month)), axis=1) <NEW_LINE> m = date_utils.month_delta(from_month, month_to_judge) <NEW_LINE> rm = from_month <NEW_LINE> ra = [] <NEW_LINE> da = [] <NEW_LINE> assets = Decimal(1.0) <NEW_LINE> while m <= to_month: <NEW_LINE> <INDENT> past = recommend.loc[(recommend['month'] >= rm) & (recommend['month'] < m)] <NEW_LINE> past_agg = past.groupby(['broker'], as_index=False).agg({'ret': cal_ret}) <NEW_LINE> past_agg['rank'] = past_agg['ret'].rank(method='dense', ascending=False) <NEW_LINE> broker_list = list(past_agg.loc[past_agg['rank'] <= broker_top_n]['broker']) <NEW_LINE> current = recommend.loc[(recommend['month'] == m) & (recommend['broker'].isin(broker_list))] <NEW_LINE> broker_share_list = split_char.join(current['ts_code']).split(split_char) <NEW_LINE> current_return = share_list_return(m, broker_share_list, min_recommend_num) <NEW_LINE> assets = assets * (1 + current_return) <NEW_LINE> m = date_utils.month_delta(m, 1) <NEW_LINE> rm = date_utils.month_delta(rm, 1) <NEW_LINE> ra.append(float(assets - 1)) <NEW_LINE> da.append(date_utils.parse_str(m + '01')) <NEW_LINE> <DEDENT> pf.plot_returns(pd.Series(ra, da)) <NEW_LINE> pyplot.show() <NEW_LINE> return assets
过去 month_to_judge 月表现最好的 broker_top_n 个券商 从中选出出现次数 >= min_recommend_num 的股票,等量买入 20210626回测 (202001, 202106, 2, 10, 3) 最高1.6 最终1.4 机构牛市效果不错,甚至躲开了开年暴跌,反而轮动市表现不好 :param from_month: :param to_month: :param month_to_judge: :param broker_top_n: :param min_recommend_num: :return:
625941b550485f2cf553cb96
def to_dict(self): <NEW_LINE> <INDENT> result = {} <NEW_LINE> for attr, _ in six.iteritems(self.swagger_types): <NEW_LINE> <INDENT> value = getattr(self, attr) <NEW_LINE> if isinstance(value, list): <NEW_LINE> <INDENT> result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) <NEW_LINE> <DEDENT> elif hasattr(value, "to_dict"): <NEW_LINE> <INDENT> result[attr] = value.to_dict() <NEW_LINE> <DEDENT> elif isinstance(value, dict): <NEW_LINE> <INDENT> result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> result[attr] = value <NEW_LINE> <DEDENT> <DEDENT> if issubclass(ScreenRecordingHeatmapIndexRequest, dict): <NEW_LINE> <INDENT> for key, value in self.items(): <NEW_LINE> <INDENT> result[key] = value <NEW_LINE> <DEDENT> <DEDENT> return result
Returns the model properties as a dict
625941b5090684286d50eadd
def detailed_xml(self, doc): <NEW_LINE> <INDENT> carNode = doc.createElement("car") <NEW_LINE> carNode.setAttribute("nb_doors", str(self.__nb_doors)) <NEW_LINE> carNode.setAttribute("weight", str(1000)) <NEW_LINE> nameNode = doc.createElement("name") <NEW_LINE> carNode.appendChild(nameNode) <NEW_LINE> nameNode.appendChild(doc.createCDATASection(self.__name)) <NEW_LINE> brandNode = doc.createElement("brand") <NEW_LINE> carNode.appendChild(brandNode) <NEW_LINE> brandNode.appendChild(doc.createCDATASection(u"\u00a9" + self.__brand)) <NEW_LINE> yearNode = doc.createElement("year") <NEW_LINE> yearNode.appendChild(doc.createTextNode("2015")) <NEW_LINE> carNode.appendChild(yearNode) <NEW_LINE> return carNode
Return an XML DOM element with the attributes and values.
625941b58c3a8732951581bb
def github_repo_role(name, rawtext, text, lineno, inliner, options={}, content=[]): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> repo_user, repo_name = text.split('/') <NEW_LINE> repo = gh.repository(repo_user, repo_name) <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> msg = inliner.reporter.error( 'GitHub API error: %s for "%s"' % e.message, text, line=lineno) <NEW_LINE> prb = inliner.problematic(rawtext, rawtext, msg) <NEW_LINE> return [prb], [msg] <NEW_LINE> <DEDENT> tpl = gh_repo_tpl <NEW_LINE> html = tpl.format(**repo.__dict__) <NEW_LINE> title = nodes.paragraph() <NEW_LINE> title += nodes.inline('', repo_name + ': ') <NEW_LINE> title += nodes.reference('', 'github', refuri=repo.html_url) <NEW_LINE> return [title], []
github repo role.
625941b5dc8b845886cb5332
def test_create_operation_definition(self): <NEW_LINE> <INDENT> with self.subTest("with no contained resources or parameters"): <NEW_LINE> <INDENT> op_def = creators.create_operation_definition(name="name.of.operation", code="code.of.operation", date_time="2019-04-23 09:00:04.159338", parameters=[], contained=[]) <NEW_LINE> self.assertIsInstance(op_def, OperationDefinition) <NEW_LINE> self.assertEqual(op_def.name, "name.of.operation") <NEW_LINE> self.assertEqual(op_def.code, "code.of.operation") <NEW_LINE> self.assertEqual(op_def.date.as_json(), "2019-04-23 09:00:04.159338") <NEW_LINE> self.assertEqual(op_def.contained, None) <NEW_LINE> self.assertEqual(op_def.parameter, None) <NEW_LINE> <DEDENT> with self.subTest("with one parameter"): <NEW_LINE> <INDENT> some_param = creators.create_parameter_with_binding(name="SOME_NAME", value="SOME_VALUE") <NEW_LINE> op_def = creators.create_operation_definition(name="name.of.operation", code="code.of.operation", date_time="2019-04-23 09:00:04.159338", parameters=[some_param], contained=[]) <NEW_LINE> self.assertEqual(len(op_def.parameter), 1) <NEW_LINE> self.assertIsInstance(op_def.parameter[0], OperationDefinitionParameter) <NEW_LINE> <DEDENT> with self.subTest("with one contained resource"): <NEW_LINE> <INDENT> some_resource = creators.create_practitioner_resource(resource_id="some_id", national_identifier="some_nat_id", local_identifier="some_local_id") <NEW_LINE> op_def = creators.create_operation_definition(name="name.of.operation", code="code.of.operation", date_time="2019-04-23 09:00:04.159338", parameters=[], contained=[some_resource]) <NEW_LINE> self.assertEqual(len(op_def.contained), 1) <NEW_LINE> self.assertIsInstance(op_def.contained[0], Practitioner)
Tests the helper function to create an operation definition
625941b521a7993f00bc7ae6
def addPageTemplates(self,pageTemplates): <NEW_LINE> <INDENT> if not isSeq(pageTemplates): <NEW_LINE> <INDENT> pageTemplates = [pageTemplates] <NEW_LINE> <DEDENT> for t in pageTemplates: <NEW_LINE> <INDENT> self.pageTemplates.append(t)
add one or a sequence of pageTemplates
625941b5097d151d1a222c5a
def get_month_salary(annual_salary): <NEW_LINE> <INDENT> return annual_salary / 12
Returns monthly salary = annual salary / 12
625941b594891a1f4081b8a5
def refine_ellipse(image, params, mode='ellipse_aligned', n=None, rad_range=None, maxfit_size=2, spline_order=3, threshold=0.1): <NEW_LINE> <INDENT> if not np.all([x > 0 for x in params]): <NEW_LINE> <INDENT> raise ValueError("All yc, xc, yr, xr params should be positive") <NEW_LINE> <DEDENT> assert image.ndim == 2 <NEW_LINE> yr, xr, yc, xc = params <NEW_LINE> if rad_range is None: <NEW_LINE> <INDENT> rad_range = (-min(yr, xr) / 2, min(yr, xr) / 2) <NEW_LINE> <DEDENT> intensity, pos, normal = unwrap_ellipse(image, params, rad_range, n) <NEW_LINE> r_dev = max_linregress(intensity, maxfit_size, threshold) + rad_range[0] <NEW_LINE> coord_new = to_cartesian(r_dev, pos, normal) <NEW_LINE> radius, center, _ = fit_ellipse(coord_new, mode=mode) <NEW_LINE> return tuple(radius) + tuple(center), coord_new.T
Interpolates the image along lines perpendicular to the ellipse. The maximum along each line is found using linear regression of the descrete derivative. Parameters ---------- image : 2d numpy array of numbers Image indices are interpreted as (y, x) params : yr, xr, yc, xc mode : {'ellipse', 'ellipse_aligned', 'circle'} n: integer number of points on the ellipse that are used for refine rad_range: tuple of floats length of the line (distance inwards, distance outwards) maxfit_size: integer pixels around maximum pixel that will be used in linear regression spline_order: integer interpolation order for edge crossections threshold: float a threshold is calculated based on the global maximum fitregions are rejected if their average value is lower than this Returns ------- yr, xr, yc, xc
625941b5a219f33f34628773
def __init__(self, configFileName): <NEW_LINE> <INDENT> self._logger = logging.getLogger(__name__) <NEW_LINE> logging.config.fileConfig("logging.ini", disable_existing_loggers=False) <NEW_LINE> self.configFileName = configFileName <NEW_LINE> self.c = ConfigParser() <NEW_LINE> self.configFile = open(configFileName, "r+") <NEW_LINE> self.c.readfp(self.configFile) <NEW_LINE> self.configFile.close()
Default Constructor
625941b592d797404e303f88
def scrap_the_world_bank_ord_data(url): <NEW_LINE> <INDENT> chromedriver = "/Applications/Internet Software/chromedriver" <NEW_LINE> driver = webdriver.Chrome(chromedriver) <NEW_LINE> driver.get(url) <NEW_LINE> download_csv = driver.find_element_by_xpath('//*[@id="mainChart"]/aside/div/div[2]/div/p/a[1]') <NEW_LINE> download_csv.click() <NEW_LINE> driver.quit()
Download the csv file from The World Bank Organization Input argument: url in string
625941b59f2886367277a68f
def fit(self, X, X_plot_projection = None): <NEW_LINE> <INDENT> if (self.method == "exp_local_scaling"): <NEW_LINE> <INDENT> border_func = lambda data: rknn_with_distance_transform(data, self.k, exp_local_scaling_transform) <NEW_LINE> <DEDENT> result = border_peel(X, border_func, None, max_iterations=self.max_iterations, mean_border_eps=self.mean_border_eps, plot_debug_output_dir=self.plot_debug_output_dir, k=self.k, precentile=self.border_precentile, dist_threshold=self.dist_threshold, link_dist_expansion_factor=self.link_dist_expansion_factor, verbose=self.verbose, vis_data=X_plot_projection, min_cluster_size=self.min_cluster_size, stopping_precentile=self.stopping_precentile, should_merge_core_points=self.merge_core_points, debug_marker_size=self.debug_marker_size) <NEW_LINE> self.labels_, self.core_points, self.non_merged_core_points, self.data_sets_by_iterations, self.associations, self.link_thresholds, self.border_values_per_iteration, self.core_points_indices = result <NEW_LINE> return self
Perform BorderPeel clustering from features Parameters ---------- X : array of features (TODO: make it work with sparse arrays) X_projected : A projection of the data to 2D used for plotting the graph during the cluster process
625941b54d74a7450ccd3fc0
def CredibleInterval(self, percentage=90): <NEW_LINE> <INDENT> cdf = self.MakeCdf() <NEW_LINE> return cdf.CredibleInterval(percentage)
Compute the central credible interval. 计算中心的置信区间。 如果percentage=90,计算90CI :param percentage:0-100 float :return: 返回两个float的序列,一个是low一个是high
625941b526238365f5f0ec67
def next_player(self): <NEW_LINE> <INDENT> current_player = self.current_player() <NEW_LINE> current_player.ball.turn = False <NEW_LINE> tries = len(self.players) <NEW_LINE> next_id = (self.players.index(current_player) + 1) % len(self.players) <NEW_LINE> while self.players[next_id].ball.state is BallState.IN_CUP and tries > 0: <NEW_LINE> <INDENT> next_id = (next_id + 1) % len(self.players) <NEW_LINE> tries -= 1 <NEW_LINE> <DEDENT> if tries > 0: <NEW_LINE> <INDENT> self.players[next_id].ball.turn = True <NEW_LINE> self.next_turn = False <NEW_LINE> print("Player {} to move".format(next_id)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print("Next map") <NEW_LINE> self.next_map() <NEW_LINE> <DEDENT> self.show_score()
Change turn for next player
625941b50a366e3fb873e614
def on_end(self, resource_id): <NEW_LINE> <INDENT> pass
Remove the hosts from the pool.
625941b5e5267d203edcda9f
def get_title(text): <NEW_LINE> <INDENT> pattern = r'(?<=<title>).*(?=</title>)' <NEW_LINE> match = re.search(pattern, text) <NEW_LINE> if match: <NEW_LINE> <INDENT> return match.group(0) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return ''
Returns the title of a given html document
625941b5b5575c28eb68ddfb
def test_equal_on_not_equal_nonce(self): <NEW_LINE> <INDENT> a = objects.AttestationCredential( nonce=objects.Nonce( nonce_id=b'\x01', nonce_value=b'\x00\x01\x02\x03\x04\x05\x06\x07' ) ) <NEW_LINE> b = objects.AttestationCredential( nonce=objects.Nonce( nonce_id=b'\x02', nonce_value=b'\x07\x06\x05\x04\x03\x02\x01\x00' ) ) <NEW_LINE> self.assertFalse(a == b) <NEW_LINE> self.assertFalse(b == a)
Test that the equality operator returns False when comparing two AttestationCredential structs with different nonce values.
625941b5d99f1b3c44c67396
def test_app_file(self): <NEW_LINE> <INDENT> res = self.app.get("/README.txt") <NEW_LINE> self.assertEqual(res.text, "Hello, world!")
Test that we can download a file.
625941b5b57a9660fec3367d
def _set_requirements(self): <NEW_LINE> <INDENT> self._requirements = None <NEW_LINE> if self._file_type == 'unknown' or self._content is None: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> if self._file_type == 'javascript': <NEW_LINE> <INDENT> require_re = re.compile(r'[ \t]*//=[ \t]*require[ \t]+(\S+)[ \t]*\n?') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> require_re = re.compile(r'@import url\(\"(?P<import_match>\S+)\.\S+"\)|[ \t]*/\*=[ \t]*require[ \t]+(?P<require_match>\S+)[ \t]*\*/\n?') <NEW_LINE> <DEDENT> for result in require_re.finditer(self._content): <NEW_LINE> <INDENT> if self._file_type == 'javascript': <NEW_LINE> <INDENT> name = str(result.groups()[0]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if result.group('import_match') is not None: <NEW_LINE> <INDENT> name = str(result.group('import_match')) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> name = str(result.group('require_match')) <NEW_LINE> <DEDENT> <DEDENT> insert_position = (result.start(), result.end()) <NEW_LINE> if self._requirements is None: <NEW_LINE> <INDENT> self._requirements = [] <NEW_LINE> <DEDENT> self._requirements.append(Requirement(name, insert_position))
Parse the content of the file and set the _requirements member variable to a list Requirement objects. Remarks: Must be called AFTER _set_content() is called and after self._file_type is set.
625941b555399d3f055884b1
def main(): <NEW_LINE> <INDENT> micro_controller = initiate_arduino() <NEW_LINE> root = Tk() <NEW_LINE> root.title("Sensor Data Collector") <NEW_LINE> output_text = Text(root) <NEW_LINE> scrollbar = Scrollbar(root) <NEW_LINE> entry = Entry(root) <NEW_LINE> button = Button(root, text="send", command=lambda: determine_input(entry, root, output_text, micro_controller)) <NEW_LINE> output_text.grid(row=0, column=0) <NEW_LINE> scrollbar.grid(row=0, column=1, sticky='ns') <NEW_LINE> entry.grid(row=1, column=0, sticky='we') <NEW_LINE> button.grid(row=1, column=1) <NEW_LINE> root.resizable(width=False, height=False) <NEW_LINE> root.bind('<Return>', (lambda event: determine_input(entry, root, output_text, micro_controller))) <NEW_LINE> output_text.delete('1.0', END) <NEW_LINE> output_text.insert(END, "Welcome to the Sensor Data Collector made by Magor Katay and Tijs van Lieshout! \n" "The following commands are accepted: \n\n" "help, ? or menu \t\t\t\t Shows this menu \n" "start (temp | light | all) <interval_in_seconds> \t\t\t Starts the data collection \n" "stop \t\t\t\t Stops the data collection \n" "list \t\t\t\t Lists the CSV files of collected data \n" "display <ID of CSV file> \t\t\t\t displays a csv file with some stats \n" "wait \t\t\t\t Toggle to wait for input on the Arduino \n") <NEW_LINE> root.mainloop()
The main function which sets up the GUI
625941b55fdd1c0f98dc0030
def compute_positions_shift_with_entity_tokens( mention_mask: tf.Tensor, mention_batch_positions: tf.Tensor, mention_start_positions: tf.Tensor, mention_end_positions: tf.Tensor, batch_size: int, old_length: int, ) -> tf.Tensor: <NEW_LINE> <INDENT> old_shape = (batch_size, old_length) <NEW_LINE> def get_positions_shift(positions: tf.Tensor, exclusive: bool) -> tf.Tensor: <NEW_LINE> <INDENT> index_2d = _get_2d_index(mention_batch_positions, positions) <NEW_LINE> return tf.cumsum( tf.scatter_nd(index_2d, mention_mask, old_shape), axis=1, exclusive=exclusive) <NEW_LINE> <DEDENT> positions = _batched_range(batch_size, old_length, 0, mention_mask.dtype) <NEW_LINE> positions += get_positions_shift(mention_start_positions, exclusive=False) <NEW_LINE> positions += get_positions_shift(mention_end_positions, exclusive=True) <NEW_LINE> return positions
Computes the new position for every position in the old sequence.
625941b5baa26c4b54cb0f22
def __init__(self, **kwargs): <NEW_LINE> <INDENT> super().__init__(**kwargs) <NEW_LINE> fname = self.kwargs.get('gmpe_table', self.gmpe_table) <NEW_LINE> with h5py.File(fname, "r") as fle: <NEW_LINE> <INDENT> self.distance_type = decode(fle["Distances"].attrs["metric"]) <NEW_LINE> self.REQUIRES_DISTANCES = set([self.distance_type]) <NEW_LINE> self.m_w = fle["Mw"][:] <NEW_LINE> self.distances = fle["Distances"][:] <NEW_LINE> self.imls = hdf_arrays_to_dict(fle["IMLs"]) <NEW_LINE> self.DEFINED_FOR_INTENSITY_MEASURE_TYPES = set( self._supported_imts()) <NEW_LINE> if "SA" in self.imls and "T" not in self.imls: <NEW_LINE> <INDENT> raise ValueError("Spectral Acceleration must be accompanied by" " periods") <NEW_LINE> <DEDENT> self._setup_standard_deviations(fle) <NEW_LINE> if "Amplification" in fle: <NEW_LINE> <INDENT> self._setup_amplification(fle)
Executes the preprocessing steps at the instantiation stage to read in the tables from hdf5 and hold them in memory.
625941b53617ad0b5ed67cfd
def to_graph(self, nodes=None, message=None): <NEW_LINE> <INDENT> sb = [] <NEW_LINE> sb.append('# Legend:') <NEW_LINE> sb.append('# Thread 1 -> Lock 1 indicates Thread 1 is waiting on Lock 1') <NEW_LINE> sb.append('# Lock 2 -> Thread 2 indicates Lock 2 is held by Thread 2') <NEW_LINE> if message is not None: <NEW_LINE> <INDENT> sb.append(message) <NEW_LINE> <DEDENT> sb.append('digraph "mongod+lock-status" {') <NEW_LINE> for node_key in self.nodes: <NEW_LINE> <INDENT> for next_node_key in self.nodes[node_key]['next_nodes']: <NEW_LINE> <INDENT> sb.append(' "{}" -> "{}";'.format(self.nodes[node_key]['node'], self.nodes[next_node_key]['node'])) <NEW_LINE> <DEDENT> <DEDENT> for node_key in self.nodes: <NEW_LINE> <INDENT> color = "" <NEW_LINE> if nodes and node_key in nodes: <NEW_LINE> <INDENT> color = "color = red" <NEW_LINE> <DEDENT> escaped_label = str(self.nodes[node_key]['node']).replace('"', '\\"') <NEW_LINE> sb.append(' "{}" [label="{}" {}]'.format(self.nodes[node_key]['node'], escaped_label, color)) <NEW_LINE> <DEDENT> sb.append("}") <NEW_LINE> return "\n".join(sb)
Return the 'to_graph'.
625941b5fbf16365ca6f5fb9
def test_node_register_JSON_metadata(self): <NEW_LINE> <INDENT> api.node_register('node-99', obm={ "type": "http://schema.massopencloud.org/haas/v0" "/obm/ipmi", "host": "ipmihost", "user": "root", "password": "tapeworm"}, metadata={ "EK": {"val1": 1, "val2": 2} }) <NEW_LINE> api.get_or_404(model.Node, 'node-99')
...and with the metadata being something other than a string.
625941b545492302aab5e0be
def multi_indep_sim(num_samp, num_dim, prob=0.5, sep1=3, sep2=2): <NEW_LINE> <INDENT> sig = np.diag(np.ones(shape=(num_dim))) <NEW_LINE> u = np.random.multivariate_normal( cov=sig, mean=np.zeros(num_dim), size=num_samp) <NEW_LINE> v = np.random.multivariate_normal( cov=sig, mean=np.zeros(num_dim), size=num_samp) <NEW_LINE> u_2 = np.random.binomial(1, p=prob, size=(num_samp, 1)) <NEW_LINE> v_2 = np.random.binomial(1, p=prob, size=(num_samp, 1)) <NEW_LINE> x = u/sep1 + sep2*u_2 - 1 <NEW_LINE> y = v/sep1 + sep2*v_2 - 1 <NEW_LINE> return x, y
Function for generating a multimodal independence simulation. :param num_samp: number of samples for the simulation :param num_dim: number of dimensions for the simulation :param prob: the binomial probability, defaults to 0.5 :param sep1: determines the size and separation of clusters, defaults to 3 :param sep2: determines the size and separation of clusters, defaults to 2 :return: the data matrix and a response array
625941b555399d3f055884b2
def test_update(self): <NEW_LINE> <INDENT> rectangle9 = Rectangle(1, 1, 1, 1) <NEW_LINE> rectangle9.update(55, 4, 3, 2, 1) <NEW_LINE> self.assertEqual(rectangle9.id, 55) <NEW_LINE> self.assertEqual(rectangle9.width, 4) <NEW_LINE> self.assertEqual(rectangle9.height, 3) <NEW_LINE> self.assertEqual(rectangle9.x, 2) <NEW_LINE> self.assertEqual(rectangle9.y, 1) <NEW_LINE> rectangle10 = Rectangle(2, 2, 2, 2) <NEW_LINE> rectangle10.update(width=1, x=9, height=7, y=8, id=56) <NEW_LINE> self.assertEqual(rectangle10.id, 56) <NEW_LINE> self.assertEqual(rectangle10.width, 1) <NEW_LINE> self.assertEqual(rectangle10.height, 7) <NEW_LINE> self.assertEqual(rectangle10.x, 9) <NEW_LINE> self.assertEqual(rectangle10.y, 8)
Test 'Rectangle' class' public 'update' function
625941b57cff6e4e81117784
@is_editor_or_ed_reviewer <NEW_LINE> def view_non_editorial_review(request, review_id, non_editorial_review_id): <NEW_LINE> <INDENT> review = get_object_or_404( models.EditorialReview, pk=review_id, completed__isnull=True, ) <NEW_LINE> if review.content_type.model == 'proposal': <NEW_LINE> <INDENT> peer_review = get_object_or_404( submission_models.ProposalReview.objects, pk=non_editorial_review_id, completed__isnull=False ) <NEW_LINE> submission = peer_review.proposal <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> peer_review = get_object_or_404( core_models.ReviewAssignment.objects, pk=non_editorial_review_id, completed__isnull=False ) <NEW_LINE> submission = peer_review.book <NEW_LINE> <DEDENT> result = peer_review.results <NEW_LINE> relations = review_models.FormElementsRelationship.objects.filter( form=result.form, ) <NEW_LINE> data_ordered = core_logic.order_data( core_logic.decode_json(result.data), relations, ) <NEW_LINE> template = 'editorialreview/view_non_editorial_review.html' <NEW_LINE> context = { 'review': review, 'peer_review': peer_review, 'data_ordered': data_ordered, 'relations': relations, 'result': result, 'submission': submission, } <NEW_LINE> return render(request, template, context)
As an editorial reviewer, view a completed peer review for the submission under review.
625941b5d58c6744b4257a5f
def get_channel_id_by_name(self, channel_name): <NEW_LINE> <INDENT> assert isinstance(channel_name, str), "`channel_name` must be a valid channel name rather than \"{}\"".format(channel_name) <NEW_LINE> channel_name = channel_name.strip().lstrip("#") <NEW_LINE> match = re.match(r"<#(\w+)(?:\|[^>]+)?>$", channel_name) <NEW_LINE> if match: return match.group(1) <NEW_LINE> for entry in self.client.server.channels: <NEW_LINE> <INDENT> if entry.name == channel_name: return entry.id <NEW_LINE> <DEDENT> return None
Returns the ID of the channel with name `channel_name`, or `None` if there are no channels with that name. Channels include public channels, direct messages with other users, and private groups.
625941b524f1403a92600968
def compare( self, other: "Series", align_axis: Union[str, int] = 1, keep_shape: bool = False, keep_equal: bool = False, ): <NEW_LINE> <INDENT> if not isinstance(other, Series): <NEW_LINE> <INDENT> raise TypeError(f"Cannot compare Series to {type(other)}") <NEW_LINE> <DEDENT> result = self.to_frame().compare( other.to_frame(), align_axis=align_axis, keep_shape=keep_shape, keep_equal=keep_equal, ) <NEW_LINE> if align_axis == "columns" or align_axis == 1: <NEW_LINE> <INDENT> result.columns = pandas.Index(["self", "other"]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> result = result.squeeze().rename(None) <NEW_LINE> <DEDENT> return result
Compare to another Series and show the differences.
625941b59f2886367277a690
def view_annual_monthly_report(self, row, col): <NEW_LINE> <INDENT> if col == 0: <NEW_LINE> <INDENT> item = self.annual_report_widget.content_table.item(row, col) <NEW_LINE> title = item.text() <NEW_LINE> file_url = STATIC_URL + item.data(Qt.UserRole).get("filepath", 'no-found.pdf') <NEW_LINE> p = PDFContentPopup(file=file_url, title=title) <NEW_LINE> p.exec_()
查看年报半年报的详细内容
625941b5d486a94d0b98df4d